* [PATCH v2 01/10] tracing: Make tracing_disabled global for tracing system
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 02/10] tracing: Make tracing_selftest_running global to the tracing subsystem Steven Rostedt
` (8 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The tracing_disabled variable is set to one on boot up to prevent some
parts of tracing to access the tracing infrastructure before it is set up.
It also can be set after boot if an anomaly is discovered.
It is currently a static variable in trace.c and can be accessed via a
function call trace_is_disabled(). There's really no reason to use a
function call as the tracing subsystem should be able to access it
directly.
By making the variable accessed directly, code can be moved out of trace.c
without adding overhead of a function call to see if tracing is disabled
or not.
Make tracing_disabled global and remove the tracing_is_disabled() helper
function. Also add some "unlikely()"s around tracing_disabled where it's
checked in hot paths.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 13 ++++---------
kernel/trace/trace.h | 3 ++-
kernel/trace/trace_events.c | 2 +-
kernel/trace/trace_kprobe.c | 2 +-
4 files changed, 8 insertions(+), 12 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d02c4004c718..1ff40c88e75c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -114,7 +114,7 @@ DEFINE_PER_CPU(bool, trace_taskinfo_save);
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
-static int tracing_disabled = 1;
+int tracing_disabled = 1;
cpumask_var_t __read_mostly tracing_buffer_mask;
@@ -3423,7 +3423,7 @@ int __trace_array_vprintk(struct trace_buffer *buffer,
unsigned int trace_ctx;
char *tbuffer;
- if (tracing_disabled)
+ if (unlikely(tracing_disabled))
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
@@ -4765,11 +4765,6 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
return 0;
}
-bool tracing_is_disabled(void)
-{
- return (tracing_disabled) ? true: false;
-}
-
/*
* Open and update trace_array ref count.
* Must have the current trace_array passed to it.
@@ -7609,7 +7604,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
unsigned long ip;
char *buf;
- if (tracing_disabled)
+ if (unlikely(tracing_disabled))
return -EINVAL;
if (!(tr->trace_flags & TRACE_ITER(MARKERS)))
@@ -7689,7 +7684,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
ssize_t written = -ENODEV;
char *buf;
- if (tracing_disabled)
+ if (unlikely(tracing_disabled))
return -EINVAL;
if (!(tr->trace_flags & TRACE_ITER(MARKERS)))
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 31fb137e1c66..433705bef480 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -657,6 +657,8 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
}
+extern int tracing_disabled;
+
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct array_buffer *buf);
@@ -668,7 +670,6 @@ int tracing_release_generic_tr(struct inode *inode, struct file *file);
int tracing_open_file_tr(struct inode *inode, struct file *filp);
int tracing_release_file_tr(struct inode *inode, struct file *filp);
int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
-bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
void tracer_tracing_off(struct trace_array *tr);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index af6d1fe5cab7..61fe01dce7a6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2268,7 +2268,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
struct event_subsystem *system = NULL;
int ret;
- if (tracing_is_disabled())
+ if (unlikely(tracing_disabled))
return -ENODEV;
/* Make sure the system still exists */
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 89d2740f7bb5..061658518605 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -2083,7 +2083,7 @@ static __init int kprobe_trace_self_tests_init(void)
struct trace_kprobe *tk;
struct trace_event_file *file;
- if (tracing_is_disabled())
+ if (unlikely(tracing_disabled))
return -ENODEV;
if (tracing_selftest_disabled)
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 02/10] tracing: Make tracing_selftest_running global to the tracing subsystem
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 01/10] tracing: Make tracing_disabled global for tracing system Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 03/10] tracing: Move __trace_buffer_{un}lock_*() functions to trace.h Steven Rostedt
` (7 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The file trace.c has become a catchall for most things tracing. Start
making it smaller by breaking out various aspects into their own files.
Make the variable tracing_selftest_running global so that it can be used
by other files in the tracing subsystem and trace.c can be split up.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 3 +--
kernel/trace/trace.h | 2 ++
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1ff40c88e75c..f040ee4fe101 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -67,7 +67,7 @@
* insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
-static bool __read_mostly tracing_selftest_running;
+bool __read_mostly tracing_selftest_running;
/*
* If boot-time tracing including tracers/events via kernel cmdline
@@ -83,7 +83,6 @@ void __init disable_tracing_selftest(const char *reason)
}
}
#else
-#define tracing_selftest_running 0
#define tracing_selftest_disabled 0
#endif
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 433705bef480..19cffc7b5852 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -863,6 +863,7 @@ extern int trace_selftest_startup_nop(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr);
+extern bool __read_mostly tracing_selftest_running;
/*
* Tracer data references selftest functions that only occur
* on boot up. These can be __init functions. Thus, when selftests
@@ -875,6 +876,7 @@ static inline void __init disable_tracing_selftest(const char *reason)
}
/* Tracers are seldom changed. Optimize when selftests are disabled. */
#define __tracer_data __read_mostly
+#define tracing_selftest_running 0
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 03/10] tracing: Move __trace_buffer_{un}lock_*() functions to trace.h
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 01/10] tracing: Make tracing_disabled global for tracing system Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 02/10] tracing: Make tracing_selftest_running global to the tracing subsystem Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 04/10] tracing: Move ftrace_trace_stack() out of trace.c and into trace.h Steven Rostedt
` (6 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The file trace.c has become a catchall for most things tracing. Start
making it smaller by breaking out various aspects into their own files.
Move the __always_inline functions __trace_buffer_lock_reserve(),
__trace_buffer_unlock_commit() and trace_event_setup() into trace.h.
The trace.c file will be split up and these functions will be used in more
than one of these files. As they are already __always_inline they can
easily be moved into the trace.h header file.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 42 ------------------------------------------
kernel/trace/trace.h | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 41 insertions(+), 42 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f040ee4fe101..55cd0c774886 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1058,30 +1058,6 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
#endif
-static __always_inline void
-trace_event_setup(struct ring_buffer_event *event,
- int type, unsigned int trace_ctx)
-{
- struct trace_entry *ent = ring_buffer_event_data(event);
-
- tracing_generic_entry_update(ent, type, trace_ctx);
-}
-
-static __always_inline struct ring_buffer_event *
-__trace_buffer_lock_reserve(struct trace_buffer *buffer,
- int type,
- unsigned long len,
- unsigned int trace_ctx)
-{
- struct ring_buffer_event *event;
-
- event = ring_buffer_lock_reserve(buffer, len);
- if (event != NULL)
- trace_event_setup(event, type, trace_ctx);
-
- return event;
-}
-
void tracer_tracing_on(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
@@ -1109,24 +1085,6 @@ void tracing_on(void)
}
EXPORT_SYMBOL_GPL(tracing_on);
-
-static __always_inline void
-__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
-{
- __this_cpu_write(trace_taskinfo_save, true);
-
- /* If this is the temp buffer, we need to commit fully */
- if (this_cpu_read(trace_buffered_event) == event) {
- /* Length is in event->array[0] */
- ring_buffer_write(buffer, event->array[0], &event->array[1]);
- /* Release the temp buffer */
- this_cpu_dec(trace_buffered_event_cnt);
- /* ring_buffer_unlock_commit() enables preemption */
- preempt_enable_notrace();
- } else
- ring_buffer_unlock_commit(buffer);
-}
-
int __trace_array_puts(struct trace_array *tr, unsigned long ip,
const char *str, int size)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 19cffc7b5852..c2beabe96952 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1568,6 +1568,47 @@ char *trace_user_fault_read(struct trace_user_buf_info *tinfo,
const char __user *ptr, size_t size,
trace_user_buf_copy copy_func, void *data);
+static __always_inline void
+trace_event_setup(struct ring_buffer_event *event,
+ int type, unsigned int trace_ctx)
+{
+ struct trace_entry *ent = ring_buffer_event_data(event);
+
+ tracing_generic_entry_update(ent, type, trace_ctx);
+}
+
+static __always_inline struct ring_buffer_event *
+__trace_buffer_lock_reserve(struct trace_buffer *buffer,
+ int type,
+ unsigned long len,
+ unsigned int trace_ctx)
+{
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(buffer, len);
+ if (event != NULL)
+ trace_event_setup(event, type, trace_ctx);
+
+ return event;
+}
+
+static __always_inline void
+__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
+{
+ __this_cpu_write(trace_taskinfo_save, true);
+
+ /* If this is the temp buffer, we need to commit fully */
+ if (this_cpu_read(trace_buffered_event) == event) {
+ /* Length is in event->array[0] */
+ ring_buffer_write(buffer, event->array[0], &event->array[1]);
+ /* Release the temp buffer */
+ this_cpu_dec(trace_buffered_event_cnt);
+ /* ring_buffer_unlock_commit() enables preemption */
+ preempt_enable_notrace();
+ } else
+ ring_buffer_unlock_commit(buffer);
+}
+
static inline void
__trace_event_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 04/10] tracing: Move ftrace_trace_stack() out of trace.c and into trace.h
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
` (2 preceding siblings ...)
2026-02-08 3:24 ` [PATCH v2 03/10] tracing: Move __trace_buffer_{un}lock_*() functions to trace.h Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 05/10] tracing: Make printk_trace global for tracing system Steven Rostedt
` (5 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The file trace.c has become a catchall for most things tracing. Start
making it smaller by breaking out various aspects into their own files.
Make ftrace_trace_stack() into a static inline that tests if stack tracing
is enabled and if so to call __ftrace_trace_stack() to do the stack trace.
This keeps the test inlined in the fast paths and only does the function
call if stack tracing is enabled.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 45 ++++----------------------------------------
kernel/trace/trace.h | 31 ++++++++++++++++++++++++++++++
2 files changed, 35 insertions(+), 41 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 55cd0c774886..a515b5241391 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1032,32 +1032,6 @@ static inline void trace_access_lock_init(void)
#endif
-#ifdef CONFIG_STACKTRACE
-static void __ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs);
-static inline void ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs);
-
-#else
-static inline void __ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
-{
-}
-static inline void ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned long trace_ctx,
- int skip, struct pt_regs *regs)
-{
-}
-
-#endif
-
void tracer_tracing_on(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
@@ -2964,10 +2938,10 @@ struct ftrace_stacks {
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
-static void __ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
+void __ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
struct ring_buffer_event *event;
unsigned int size, nr_entries;
@@ -3050,17 +3024,6 @@ static void __ftrace_trace_stack(struct trace_array *tr,
trace_clear_recursion(bit);
}
-static inline void ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
-{
- if (!(tr->trace_flags & TRACE_ITER(STACKTRACE)))
- return;
-
- __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
-}
-
void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
int skip)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c2beabe96952..605ee23f3262 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -2279,6 +2279,37 @@ static inline void sanitize_event_name(char *name)
*name = '_';
}
+#ifdef CONFIG_STACKTRACE
+void __ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs);
+
+static __always_inline void ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
+{
+ if (!(tr->trace_flags & TRACE_ITER(STACKTRACE)))
+ return;
+
+ __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
+}
+#else
+static inline void __ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
+{
+}
+static inline void ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned long trace_ctx,
+ int skip, struct pt_regs *regs)
+{
+}
+#endif
+
/*
* This is a generic way to read and write a u64 value from a file in tracefs.
*
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 05/10] tracing: Make printk_trace global for tracing system
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
` (3 preceding siblings ...)
2026-02-08 3:24 ` [PATCH v2 04/10] tracing: Move ftrace_trace_stack() out of trace.c and into trace.h Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 06/10] tracing: Make tracing_update_buffers() take NULL for global_trace Steven Rostedt
` (4 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The printk_trace is used to determine which trace_array trace_printk()
writes to. By making it a global variable among the tracing subsystem it
will allow the trace_printk functions to be moved out of trace.c and still
have direct access to that variable.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 2 +-
kernel/trace/trace.h | 2 ++
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a515b5241391..4a73822e2603 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -534,7 +534,7 @@ static struct trace_array global_trace = {
.trace_flags = TRACE_DEFAULT_FLAGS,
};
-static struct trace_array *printk_trace = &global_trace;
+struct trace_array *printk_trace = &global_trace;
/* List of trace_arrays interested in the top level trace_marker */
static LIST_HEAD(marker_copies);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 605ee23f3262..921e4daa2825 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -482,6 +482,8 @@ extern bool trace_clock_in_ns(struct trace_array *tr);
extern unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr);
+extern struct trace_array *printk_trace;
+
/*
* The global tracer (top) should be the first trace array added,
* but we check the flag anyway.
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 06/10] tracing: Make tracing_update_buffers() take NULL for global_trace
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
` (4 preceding siblings ...)
2026-02-08 3:24 ` [PATCH v2 05/10] tracing: Make printk_trace global for tracing system Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 07/10] tracing: Have trace_printk functions use flags instead of using global_trace Steven Rostedt
` (3 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The trace.c file has become a dumping ground for all tracing code and has
become quite large. In order to move the trace_printk functions out of it
these functions can not access global_trace directly, as that is something
that needs to stay static in trace.c.
Have tracing_update_buffers() take NULL for its trace_array to denote it
should work on the global_trace top level trace_array allows that function
to be used outside of trace.c and still update the global_trace
trace_array.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4a73822e2603..601b6f622391 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3234,7 +3234,7 @@ void trace_printk_init_buffers(void)
pr_warn("**********************************************************\n");
/* Expand the buffers to set size */
- if (tracing_update_buffers(&global_trace) < 0)
+ if (tracing_update_buffers(NULL) < 0)
pr_err("Failed to expand tracing buffers for trace_printk() calls\n");
else
buffers_allocated = 1;
@@ -6186,6 +6186,9 @@ int tracing_update_buffers(struct trace_array *tr)
{
int ret = 0;
+ if (!tr)
+ tr = &global_trace;
+
guard(mutex)(&trace_types_lock);
update_last_data(tr);
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 07/10] tracing: Have trace_printk functions use flags instead of using global_trace
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
` (5 preceding siblings ...)
2026-02-08 3:24 ` [PATCH v2 06/10] tracing: Make tracing_update_buffers() take NULL for global_trace Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 08/10] tracing: Use system_state in trace_printk_init_buffers() Steven Rostedt
` (2 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The trace.c file has become a dumping ground for all tracing code and has
become quite large. In order to move the trace_printk functions out of it
these functions can not access global_trace directly, as that is something
that needs to stay static in trace.c.
Instead of testing the trace_array tr pointer to &global_trace, test the
tr->flags to see if TRACE_ARRAY_FL_GLOBAL set.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 601b6f622391..f4ae80564615 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1071,7 +1071,8 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
return 0;
- if (unlikely(tracing_selftest_running && tr == &global_trace))
+ if (unlikely(tracing_selftest_running &&
+ (tr->flags & TRACE_ARRAY_FL_GLOBAL)))
return 0;
if (unlikely(tracing_disabled))
@@ -3386,7 +3387,7 @@ int __trace_array_vprintk(struct trace_buffer *buffer,
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- if (tracing_selftest_running && tr == &global_trace)
+ if (tracing_selftest_running && (tr->flags & TRACE_ARRAY_FL_GLOBAL))
return 0;
return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
@@ -3422,7 +3423,7 @@ int trace_array_printk(struct trace_array *tr,
return -ENOENT;
/* This is only allowed for created instances */
- if (tr == &global_trace)
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return 0;
if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
@@ -3449,7 +3450,7 @@ int trace_array_init_printk(struct trace_array *tr)
return -ENOENT;
/* This is only allowed for created instances */
- if (tr == &global_trace)
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return -EINVAL;
return alloc_percpu_trace_buffer();
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 08/10] tracing: Use system_state in trace_printk_init_buffers()
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
` (6 preceding siblings ...)
2026-02-08 3:24 ` [PATCH v2 07/10] tracing: Have trace_printk functions use flags instead of using global_trace Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 09/10] tracing: Move trace_printk functions out of trace.c and into trace_printk.c Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 10/10] tracing: Move pid filtering into trace_pid.c Steven Rostedt
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The function trace_printk_init_buffers() is used to expand tha
trace_printk buffers when trace_printk() is used within the kernel or in
modules. On kernel boot up, it holds off from starting the sched switch
cmdline recorder, but will start it immediately when it is added by a
module.
Currently it uses a trick to see if the global_trace buffer has been
allocated or not to know if it was called by module load or not. But this
is more of a hack, and can not be used when this code is moved out of
trace.c. Instead simply look at the system_state and if it is running then
it is know that it could only be called by module load.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f4ae80564615..4066c33674e7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3243,10 +3243,9 @@ void trace_printk_init_buffers(void)
/*
* trace_printk_init_buffers() can be called by modules.
* If that happens, then we need to start cmdline recording
- * directly here. If the global_trace.buffer is already
- * allocated here, then this was called by module code.
+ * directly here.
*/
- if (global_trace.array_buffer.buffer)
+ if (system_state == SYSTEM_RUNNING)
tracing_start_cmdline_record();
}
EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 09/10] tracing: Move trace_printk functions out of trace.c and into trace_printk.c
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
` (7 preceding siblings ...)
2026-02-08 3:24 ` [PATCH v2 08/10] tracing: Use system_state in trace_printk_init_buffers() Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
2026-02-08 3:24 ` [PATCH v2 10/10] tracing: Move pid filtering into trace_pid.c Steven Rostedt
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The file trace.c has become a catchall for most things tracing. Start
making it smaller by breaking out various aspects into their own files.
Move the functions associated to the trace_printk operations out of trace.c and
into trace_printk.c.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/trace.c | 431 ------------------------------------
kernel/trace/trace.h | 1 +
kernel/trace/trace_printk.c | 431 ++++++++++++++++++++++++++++++++++++
3 files changed, 432 insertions(+), 431 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4066c33674e7..5812b830c1fa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -539,17 +539,6 @@ struct trace_array *printk_trace = &global_trace;
/* List of trace_arrays interested in the top level trace_marker */
static LIST_HEAD(marker_copies);
-static __always_inline bool printk_binsafe(struct trace_array *tr)
-{
- /*
- * The binary format of traceprintk can cause a crash if used
- * by a buffer from another boot. Force the use of the
- * non binary version of trace_printk if the trace_printk
- * buffer is a boot mapped ring buffer.
- */
- return !(tr->flags & TRACE_ARRAY_FL_BOOT);
-}
-
static void update_printk_trace(struct trace_array *tr)
{
if (printk_trace == tr)
@@ -1059,108 +1048,6 @@ void tracing_on(void)
}
EXPORT_SYMBOL_GPL(tracing_on);
-int __trace_array_puts(struct trace_array *tr, unsigned long ip,
- const char *str, int size)
-{
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- struct print_entry *entry;
- unsigned int trace_ctx;
- int alloc;
-
- if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- if (unlikely(tracing_selftest_running &&
- (tr->flags & TRACE_ARRAY_FL_GLOBAL)))
- return 0;
-
- if (unlikely(tracing_disabled))
- return 0;
-
- alloc = sizeof(*entry) + size + 2; /* possible \n added */
-
- trace_ctx = tracing_gen_ctx();
- buffer = tr->array_buffer.buffer;
- guard(ring_buffer_nest)(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- trace_ctx);
- if (!event)
- return 0;
-
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
-
- memcpy(&entry->buf, str, size);
-
- /* Add a newline if necessary */
- if (entry->buf[size - 1] != '\n') {
- entry->buf[size] = '\n';
- entry->buf[size + 1] = '\0';
- } else
- entry->buf[size] = '\0';
-
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
- return size;
-}
-EXPORT_SYMBOL_GPL(__trace_array_puts);
-
-/**
- * __trace_puts - write a constant string into the trace buffer.
- * @ip: The address of the caller
- * @str: The constant string to write
- * @size: The size of the string.
- */
-int __trace_puts(unsigned long ip, const char *str, int size)
-{
- return __trace_array_puts(printk_trace, ip, str, size);
-}
-EXPORT_SYMBOL_GPL(__trace_puts);
-
-/**
- * __trace_bputs - write the pointer to a constant string into trace buffer
- * @ip: The address of the caller
- * @str: The constant string to write to the buffer to
- */
-int __trace_bputs(unsigned long ip, const char *str)
-{
- struct trace_array *tr = READ_ONCE(printk_trace);
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- struct bputs_entry *entry;
- unsigned int trace_ctx;
- int size = sizeof(struct bputs_entry);
-
- if (!printk_binsafe(tr))
- return __trace_puts(ip, str, strlen(str));
-
- if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- if (unlikely(tracing_selftest_running || tracing_disabled))
- return 0;
-
- trace_ctx = tracing_gen_ctx();
- buffer = tr->array_buffer.buffer;
-
- guard(ring_buffer_nest)(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- trace_ctx);
- if (!event)
- return 0;
-
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
- entry->str = str;
-
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(__trace_bputs);
-
#ifdef CONFIG_TRACER_SNAPSHOT
static void tracing_snapshot_instance_cond(struct trace_array *tr,
void *cond_data)
@@ -3159,324 +3046,6 @@ void trace_last_func_repeats(struct trace_array *tr,
__buffer_unlock_commit(buffer, event);
}
-/* created for use with alloc_percpu */
-struct trace_buffer_struct {
- int nesting;
- char buffer[4][TRACE_BUF_SIZE];
-};
-
-static struct trace_buffer_struct __percpu *trace_percpu_buffer;
-
-/*
- * This allows for lockless recording. If we're nested too deeply, then
- * this returns NULL.
- */
-static char *get_trace_buf(void)
-{
- struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
-
- if (!trace_percpu_buffer || buffer->nesting >= 4)
- return NULL;
-
- buffer->nesting++;
-
- /* Interrupts must see nesting incremented before we use the buffer */
- barrier();
- return &buffer->buffer[buffer->nesting - 1][0];
-}
-
-static void put_trace_buf(void)
-{
- /* Don't let the decrement of nesting leak before this */
- barrier();
- this_cpu_dec(trace_percpu_buffer->nesting);
-}
-
-static int alloc_percpu_trace_buffer(void)
-{
- struct trace_buffer_struct __percpu *buffers;
-
- if (trace_percpu_buffer)
- return 0;
-
- buffers = alloc_percpu(struct trace_buffer_struct);
- if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
- return -ENOMEM;
-
- trace_percpu_buffer = buffers;
- return 0;
-}
-
-static int buffers_allocated;
-
-void trace_printk_init_buffers(void)
-{
- if (buffers_allocated)
- return;
-
- if (alloc_percpu_trace_buffer())
- return;
-
- /* trace_printk() is for debug use only. Don't use it in production. */
-
- pr_warn("\n");
- pr_warn("**********************************************************\n");
- pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
- pr_warn("** **\n");
- pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
- pr_warn("** **\n");
- pr_warn("** This means that this is a DEBUG kernel and it is **\n");
- pr_warn("** unsafe for production use. **\n");
- pr_warn("** **\n");
- pr_warn("** If you see this message and you are not debugging **\n");
- pr_warn("** the kernel, report this immediately to your vendor! **\n");
- pr_warn("** **\n");
- pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
- pr_warn("**********************************************************\n");
-
- /* Expand the buffers to set size */
- if (tracing_update_buffers(NULL) < 0)
- pr_err("Failed to expand tracing buffers for trace_printk() calls\n");
- else
- buffers_allocated = 1;
-
- /*
- * trace_printk_init_buffers() can be called by modules.
- * If that happens, then we need to start cmdline recording
- * directly here.
- */
- if (system_state == SYSTEM_RUNNING)
- tracing_start_cmdline_record();
-}
-EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
-
-void trace_printk_start_comm(void)
-{
- /* Start tracing comms if trace printk is set */
- if (!buffers_allocated)
- return;
- tracing_start_cmdline_record();
-}
-
-static void trace_printk_start_stop_comm(int enabled)
-{
- if (!buffers_allocated)
- return;
-
- if (enabled)
- tracing_start_cmdline_record();
- else
- tracing_stop_cmdline_record();
-}
-
-/**
- * trace_vbprintk - write binary msg to tracing buffer
- * @ip: The address of the caller
- * @fmt: The string format to write to the buffer
- * @args: Arguments for @fmt
- */
-int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
-{
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- struct trace_array *tr = READ_ONCE(printk_trace);
- struct bprint_entry *entry;
- unsigned int trace_ctx;
- char *tbuffer;
- int len = 0, size;
-
- if (!printk_binsafe(tr))
- return trace_vprintk(ip, fmt, args);
-
- if (unlikely(tracing_selftest_running || tracing_disabled))
- return 0;
-
- /* Don't pollute graph traces with trace_vprintk internals */
- pause_graph_tracing();
-
- trace_ctx = tracing_gen_ctx();
- guard(preempt_notrace)();
-
- tbuffer = get_trace_buf();
- if (!tbuffer) {
- len = 0;
- goto out_nobuffer;
- }
-
- len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
-
- if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
- goto out_put;
-
- size = sizeof(*entry) + sizeof(u32) * len;
- buffer = tr->array_buffer.buffer;
- scoped_guard(ring_buffer_nest, buffer) {
- event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
- trace_ctx);
- if (!event)
- goto out_put;
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
- entry->fmt = fmt;
-
- memcpy(entry->buf, tbuffer, sizeof(u32) * len);
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
- }
-out_put:
- put_trace_buf();
-
-out_nobuffer:
- unpause_graph_tracing();
-
- return len;
-}
-EXPORT_SYMBOL_GPL(trace_vbprintk);
-
-static __printf(3, 0)
-int __trace_array_vprintk(struct trace_buffer *buffer,
- unsigned long ip, const char *fmt, va_list args)
-{
- struct ring_buffer_event *event;
- int len = 0, size;
- struct print_entry *entry;
- unsigned int trace_ctx;
- char *tbuffer;
-
- if (unlikely(tracing_disabled))
- return 0;
-
- /* Don't pollute graph traces with trace_vprintk internals */
- pause_graph_tracing();
-
- trace_ctx = tracing_gen_ctx();
- guard(preempt_notrace)();
-
-
- tbuffer = get_trace_buf();
- if (!tbuffer) {
- len = 0;
- goto out_nobuffer;
- }
-
- len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
-
- size = sizeof(*entry) + len + 1;
- scoped_guard(ring_buffer_nest, buffer) {
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- trace_ctx);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
-
- memcpy(&entry->buf, tbuffer, len + 1);
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
- }
-out:
- put_trace_buf();
-
-out_nobuffer:
- unpause_graph_tracing();
-
- return len;
-}
-
-int trace_array_vprintk(struct trace_array *tr,
- unsigned long ip, const char *fmt, va_list args)
-{
- if (tracing_selftest_running && (tr->flags & TRACE_ARRAY_FL_GLOBAL))
- return 0;
-
- return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
-}
-
-/**
- * trace_array_printk - Print a message to a specific instance
- * @tr: The instance trace_array descriptor
- * @ip: The instruction pointer that this is called from.
- * @fmt: The format to print (printf format)
- *
- * If a subsystem sets up its own instance, they have the right to
- * printk strings into their tracing instance buffer using this
- * function. Note, this function will not write into the top level
- * buffer (use trace_printk() for that), as writing into the top level
- * buffer should only have events that can be individually disabled.
- * trace_printk() is only used for debugging a kernel, and should not
- * be ever incorporated in normal use.
- *
- * trace_array_printk() can be used, as it will not add noise to the
- * top level tracing buffer.
- *
- * Note, trace_array_init_printk() must be called on @tr before this
- * can be used.
- */
-int trace_array_printk(struct trace_array *tr,
- unsigned long ip, const char *fmt, ...)
-{
- int ret;
- va_list ap;
-
- if (!tr)
- return -ENOENT;
-
- /* This is only allowed for created instances */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
- return 0;
-
- if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- va_start(ap, fmt);
- ret = trace_array_vprintk(tr, ip, fmt, ap);
- va_end(ap);
- return ret;
-}
-EXPORT_SYMBOL_GPL(trace_array_printk);
-
-/**
- * trace_array_init_printk - Initialize buffers for trace_array_printk()
- * @tr: The trace array to initialize the buffers for
- *
- * As trace_array_printk() only writes into instances, they are OK to
- * have in the kernel (unlike trace_printk()). This needs to be called
- * before trace_array_printk() can be used on a trace_array.
- */
-int trace_array_init_printk(struct trace_array *tr)
-{
- if (!tr)
- return -ENOENT;
-
- /* This is only allowed for created instances */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
- return -EINVAL;
-
- return alloc_percpu_trace_buffer();
-}
-EXPORT_SYMBOL_GPL(trace_array_init_printk);
-
-int trace_array_printk_buf(struct trace_buffer *buffer,
- unsigned long ip, const char *fmt, ...)
-{
- int ret;
- va_list ap;
-
- if (!(printk_trace->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- va_start(ap, fmt);
- ret = __trace_array_vprintk(buffer, ip, fmt, ap);
- va_end(ap);
- return ret;
-}
-
-int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
-{
- return trace_array_vprintk(printk_trace, ip, fmt, args);
-}
-EXPORT_SYMBOL_GPL(trace_vprintk);
-
static void trace_iterator_increment(struct trace_iterator *iter)
{
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 921e4daa2825..6b0fedf2f532 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -2131,6 +2131,7 @@ extern const char *__stop___tracepoint_str[];
void trace_printk_control(bool enabled);
void trace_printk_start_comm(void);
+void trace_printk_start_stop_comm(int enabled);
int trace_keep_overwrite(struct tracer *tracer, u64 mask, int set);
int set_tracer_flag(struct trace_array *tr, u64 mask, int enabled);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 29f6e95439b6..c9cb74a33b3c 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -376,6 +376,437 @@ static const struct file_operations ftrace_formats_fops = {
.release = seq_release,
};
+static __always_inline bool printk_binsafe(struct trace_array *tr)
+{
+ /*
+ * The binary format of traceprintk can cause a crash if used
+ * by a buffer from another boot. Force the use of the
+ * non binary version of trace_printk if the trace_printk
+ * buffer is a boot mapped ring buffer.
+ */
+ return !(tr->flags & TRACE_ARRAY_FL_BOOT);
+}
+
+int __trace_array_puts(struct trace_array *tr, unsigned long ip,
+ const char *str, int size)
+{
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+ struct print_entry *entry;
+ unsigned int trace_ctx;
+ int alloc;
+
+ if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
+ return 0;
+
+ if (unlikely(tracing_selftest_running &&
+ (tr->flags & TRACE_ARRAY_FL_GLOBAL)))
+ return 0;
+
+ if (unlikely(tracing_disabled))
+ return 0;
+
+ alloc = sizeof(*entry) + size + 2; /* possible \n added */
+
+ trace_ctx = tracing_gen_ctx();
+ buffer = tr->array_buffer.buffer;
+ guard(ring_buffer_nest)(buffer);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ trace_ctx);
+ if (!event)
+ return 0;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+
+ memcpy(&entry->buf, str, size);
+
+ /* Add a newline if necessary */
+ if (entry->buf[size - 1] != '\n') {
+ entry->buf[size] = '\n';
+ entry->buf[size + 1] = '\0';
+ } else
+ entry->buf[size] = '\0';
+
+ __buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
+ return size;
+}
+EXPORT_SYMBOL_GPL(__trace_array_puts);
+
+/**
+ * __trace_puts - write a constant string into the trace buffer.
+ * @ip: The address of the caller
+ * @str: The constant string to write
+ * @size: The size of the string.
+ */
+int __trace_puts(unsigned long ip, const char *str, int size)
+{
+ return __trace_array_puts(printk_trace, ip, str, size);
+}
+EXPORT_SYMBOL_GPL(__trace_puts);
+
+/**
+ * __trace_bputs - write the pointer to a constant string into trace buffer
+ * @ip: The address of the caller
+ * @str: The constant string to write to the buffer to
+ */
+int __trace_bputs(unsigned long ip, const char *str)
+{
+ struct trace_array *tr = READ_ONCE(printk_trace);
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+ struct bputs_entry *entry;
+ unsigned int trace_ctx;
+ int size = sizeof(struct bputs_entry);
+
+ if (!printk_binsafe(tr))
+ return __trace_puts(ip, str, strlen(str));
+
+ if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
+ return 0;
+
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+ return 0;
+
+ trace_ctx = tracing_gen_ctx();
+ buffer = tr->array_buffer.buffer;
+
+ guard(ring_buffer_nest)(buffer);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+ trace_ctx);
+ if (!event)
+ return 0;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->str = str;
+
+ __buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(__trace_bputs);
+
+/* created for use with alloc_percpu */
+struct trace_buffer_struct {
+ int nesting;
+ char buffer[4][TRACE_BUF_SIZE];
+};
+
+static struct trace_buffer_struct __percpu *trace_percpu_buffer;
+
+/*
+ * This allows for lockless recording. If we're nested too deeply, then
+ * this returns NULL.
+ */
+static char *get_trace_buf(void)
+{
+ struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
+
+ if (!trace_percpu_buffer || buffer->nesting >= 4)
+ return NULL;
+
+ buffer->nesting++;
+
+ /* Interrupts must see nesting incremented before we use the buffer */
+ barrier();
+ return &buffer->buffer[buffer->nesting - 1][0];
+}
+
+static void put_trace_buf(void)
+{
+ /* Don't let the decrement of nesting leak before this */
+ barrier();
+ this_cpu_dec(trace_percpu_buffer->nesting);
+}
+
+static int alloc_percpu_trace_buffer(void)
+{
+ struct trace_buffer_struct __percpu *buffers;
+
+ if (trace_percpu_buffer)
+ return 0;
+
+ buffers = alloc_percpu(struct trace_buffer_struct);
+ if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
+ return -ENOMEM;
+
+ trace_percpu_buffer = buffers;
+ return 0;
+}
+
+static int buffers_allocated;
+
+void trace_printk_init_buffers(void)
+{
+ if (buffers_allocated)
+ return;
+
+ if (alloc_percpu_trace_buffer())
+ return;
+
+ /* trace_printk() is for debug use only. Don't use it in production. */
+
+ pr_warn("\n");
+ pr_warn("**********************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
+ pr_warn("** **\n");
+ pr_warn("** This means that this is a DEBUG kernel and it is **\n");
+ pr_warn("** unsafe for production use. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging **\n");
+ pr_warn("** the kernel, report this immediately to your vendor! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("**********************************************************\n");
+
+ /* Expand the buffers to set size */
+ if (tracing_update_buffers(NULL) < 0)
+ pr_err("Failed to expand tracing buffers for trace_printk() calls\n");
+ else
+ buffers_allocated = 1;
+
+ /*
+ * trace_printk_init_buffers() can be called by modules.
+ * If that happens, then we need to start cmdline recording
+ * directly here.
+ */
+ if (system_state == SYSTEM_RUNNING)
+ tracing_start_cmdline_record();
+}
+EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
+
+void trace_printk_start_comm(void)
+{
+ /* Start tracing comms if trace printk is set */
+ if (!buffers_allocated)
+ return;
+ tracing_start_cmdline_record();
+}
+
+void trace_printk_start_stop_comm(int enabled)
+{
+ if (!buffers_allocated)
+ return;
+
+ if (enabled)
+ tracing_start_cmdline_record();
+ else
+ tracing_stop_cmdline_record();
+}
+
+/**
+ * trace_vbprintk - write binary msg to tracing buffer
+ * @ip: The address of the caller
+ * @fmt: The string format to write to the buffer
+ * @args: Arguments for @fmt
+ */
+int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+{
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+ struct trace_array *tr = READ_ONCE(printk_trace);
+ struct bprint_entry *entry;
+ unsigned int trace_ctx;
+ char *tbuffer;
+ int len = 0, size;
+
+ if (!printk_binsafe(tr))
+ return trace_vprintk(ip, fmt, args);
+
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+ return 0;
+
+ /* Don't pollute graph traces with trace_vprintk internals */
+ pause_graph_tracing();
+
+ trace_ctx = tracing_gen_ctx();
+ guard(preempt_notrace)();
+
+ tbuffer = get_trace_buf();
+ if (!tbuffer) {
+ len = 0;
+ goto out_nobuffer;
+ }
+
+ len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
+
+ if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
+ goto out_put;
+
+ size = sizeof(*entry) + sizeof(u32) * len;
+ buffer = tr->array_buffer.buffer;
+ scoped_guard(ring_buffer_nest, buffer) {
+ event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
+ trace_ctx);
+ if (!event)
+ goto out_put;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->fmt = fmt;
+
+ memcpy(entry->buf, tbuffer, sizeof(u32) * len);
+ __buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
+ }
+out_put:
+ put_trace_buf();
+
+out_nobuffer:
+ unpause_graph_tracing();
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(trace_vbprintk);
+
+static __printf(3, 0)
+int __trace_array_vprintk(struct trace_buffer *buffer,
+ unsigned long ip, const char *fmt, va_list args)
+{
+ struct ring_buffer_event *event;
+ int len = 0, size;
+ struct print_entry *entry;
+ unsigned int trace_ctx;
+ char *tbuffer;
+
+ if (unlikely(tracing_disabled))
+ return 0;
+
+ /* Don't pollute graph traces with trace_vprintk internals */
+ pause_graph_tracing();
+
+ trace_ctx = tracing_gen_ctx();
+ guard(preempt_notrace)();
+
+
+ tbuffer = get_trace_buf();
+ if (!tbuffer) {
+ len = 0;
+ goto out_nobuffer;
+ }
+
+ len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
+
+ size = sizeof(*entry) + len + 1;
+ scoped_guard(ring_buffer_nest, buffer) {
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+ trace_ctx);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+
+ memcpy(&entry->buf, tbuffer, len + 1);
+ __buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
+ }
+out:
+ put_trace_buf();
+
+out_nobuffer:
+ unpause_graph_tracing();
+
+ return len;
+}
+
+int trace_array_vprintk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, va_list args)
+{
+ if (tracing_selftest_running && (tr->flags & TRACE_ARRAY_FL_GLOBAL))
+ return 0;
+
+ return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
+}
+
+/**
+ * trace_array_printk - Print a message to a specific instance
+ * @tr: The instance trace_array descriptor
+ * @ip: The instruction pointer that this is called from.
+ * @fmt: The format to print (printf format)
+ *
+ * If a subsystem sets up its own instance, they have the right to
+ * printk strings into their tracing instance buffer using this
+ * function. Note, this function will not write into the top level
+ * buffer (use trace_printk() for that), as writing into the top level
+ * buffer should only have events that can be individually disabled.
+ * trace_printk() is only used for debugging a kernel, and should not
+ * be ever incorporated in normal use.
+ *
+ * trace_array_printk() can be used, as it will not add noise to the
+ * top level tracing buffer.
+ *
+ * Note, trace_array_init_printk() must be called on @tr before this
+ * can be used.
+ */
+int trace_array_printk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+
+ if (!tr)
+ return -ENOENT;
+
+ /* This is only allowed for created instances */
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+ return 0;
+
+ if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
+ return 0;
+
+ va_start(ap, fmt);
+ ret = trace_array_vprintk(tr, ip, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(trace_array_printk);
+
+/**
+ * trace_array_init_printk - Initialize buffers for trace_array_printk()
+ * @tr: The trace array to initialize the buffers for
+ *
+ * As trace_array_printk() only writes into instances, they are OK to
+ * have in the kernel (unlike trace_printk()). This needs to be called
+ * before trace_array_printk() can be used on a trace_array.
+ */
+int trace_array_init_printk(struct trace_array *tr)
+{
+ if (!tr)
+ return -ENOENT;
+
+ /* This is only allowed for created instances */
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+ return -EINVAL;
+
+ return alloc_percpu_trace_buffer();
+}
+EXPORT_SYMBOL_GPL(trace_array_init_printk);
+
+int trace_array_printk_buf(struct trace_buffer *buffer,
+ unsigned long ip, const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+
+ if (!(printk_trace->trace_flags & TRACE_ITER(PRINTK)))
+ return 0;
+
+ va_start(ap, fmt);
+ ret = __trace_array_vprintk(buffer, ip, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
+{
+ return trace_array_vprintk(printk_trace, ip, fmt, args);
+}
+EXPORT_SYMBOL_GPL(trace_vprintk);
+
static __init int init_trace_printk_function_export(void)
{
int ret;
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 10/10] tracing: Move pid filtering into trace_pid.c
2026-02-08 3:24 [PATCH v2 00/10] tracing: Clean up trace.c and move some code into other files Steven Rostedt
` (8 preceding siblings ...)
2026-02-08 3:24 ` [PATCH v2 09/10] tracing: Move trace_printk functions out of trace.c and into trace_printk.c Steven Rostedt
@ 2026-02-08 3:24 ` Steven Rostedt
9 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2026-02-08 3:24 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton
From: Steven Rostedt <rostedt@goodmis.org>
The trace.c file was a dumping ground for most tracing code. Start
organizing it better by moving various functions out into their own files.
Move the PID filtering functions from trace.c into its own trace_pid.c
file.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/trace/Makefile | 1 +
kernel/trace/trace.c | 242 --------------------------------------
kernel/trace/trace_pid.c | 246 +++++++++++++++++++++++++++++++++++++++
3 files changed, 247 insertions(+), 242 deletions(-)
create mode 100644 kernel/trace/trace_pid.c
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index fc5dcc888e13..04096c21d06b 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_TRACING) += trace_output.o
obj-$(CONFIG_TRACING) += trace_seq.o
obj-$(CONFIG_TRACING) += trace_stat.o
obj-$(CONFIG_TRACING) += trace_printk.o
+obj-$(CONFIG_TRACING) += trace_pid.o
obj-$(CONFIG_TRACING) += pid_list.o
obj-$(CONFIG_TRACING_MAP) += tracing_map.o
obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5812b830c1fa..551a452befa0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -637,248 +637,6 @@ int tracing_check_open_get_tr(struct trace_array *tr)
return 0;
}
-/**
- * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
- * @filtered_pids: The list of pids to check
- * @search_pid: The PID to find in @filtered_pids
- *
- * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
- */
-bool
-trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
-{
- return trace_pid_list_is_set(filtered_pids, search_pid);
-}
-
-/**
- * trace_ignore_this_task - should a task be ignored for tracing
- * @filtered_pids: The list of pids to check
- * @filtered_no_pids: The list of pids not to be traced
- * @task: The task that should be ignored if not filtered
- *
- * Checks if @task should be traced or not from @filtered_pids.
- * Returns true if @task should *NOT* be traced.
- * Returns false if @task should be traced.
- */
-bool
-trace_ignore_this_task(struct trace_pid_list *filtered_pids,
- struct trace_pid_list *filtered_no_pids,
- struct task_struct *task)
-{
- /*
- * If filtered_no_pids is not empty, and the task's pid is listed
- * in filtered_no_pids, then return true.
- * Otherwise, if filtered_pids is empty, that means we can
- * trace all tasks. If it has content, then only trace pids
- * within filtered_pids.
- */
-
- return (filtered_pids &&
- !trace_find_filtered_pid(filtered_pids, task->pid)) ||
- (filtered_no_pids &&
- trace_find_filtered_pid(filtered_no_pids, task->pid));
-}
-
-/**
- * trace_filter_add_remove_task - Add or remove a task from a pid_list
- * @pid_list: The list to modify
- * @self: The current task for fork or NULL for exit
- * @task: The task to add or remove
- *
- * If adding a task, if @self is defined, the task is only added if @self
- * is also included in @pid_list. This happens on fork and tasks should
- * only be added when the parent is listed. If @self is NULL, then the
- * @task pid will be removed from the list, which would happen on exit
- * of a task.
- */
-void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
- struct task_struct *self,
- struct task_struct *task)
-{
- if (!pid_list)
- return;
-
- /* For forks, we only add if the forking task is listed */
- if (self) {
- if (!trace_find_filtered_pid(pid_list, self->pid))
- return;
- }
-
- /* "self" is set for forks, and NULL for exits */
- if (self)
- trace_pid_list_set(pid_list, task->pid);
- else
- trace_pid_list_clear(pid_list, task->pid);
-}
-
-/**
- * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
- * @pid_list: The pid list to show
- * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
- * @pos: The position of the file
- *
- * This is used by the seq_file "next" operation to iterate the pids
- * listed in a trace_pid_list structure.
- *
- * Returns the pid+1 as we want to display pid of zero, but NULL would
- * stop the iteration.
- */
-void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
-{
- long pid = (unsigned long)v;
- unsigned int next;
-
- (*pos)++;
-
- /* pid already is +1 of the actual previous bit */
- if (trace_pid_list_next(pid_list, pid, &next) < 0)
- return NULL;
-
- pid = next;
-
- /* Return pid + 1 to allow zero to be represented */
- return (void *)(pid + 1);
-}
-
-/**
- * trace_pid_start - Used for seq_file to start reading pid lists
- * @pid_list: The pid list to show
- * @pos: The position of the file
- *
- * This is used by seq_file "start" operation to start the iteration
- * of listing pids.
- *
- * Returns the pid+1 as we want to display pid of zero, but NULL would
- * stop the iteration.
- */
-void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
-{
- unsigned long pid;
- unsigned int first;
- loff_t l = 0;
-
- if (trace_pid_list_first(pid_list, &first) < 0)
- return NULL;
-
- pid = first;
-
- /* Return pid + 1 so that zero can be the exit value */
- for (pid++; pid && l < *pos;
- pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
- ;
- return (void *)pid;
-}
-
-/**
- * trace_pid_show - show the current pid in seq_file processing
- * @m: The seq_file structure to write into
- * @v: A void pointer of the pid (+1) value to display
- *
- * Can be directly used by seq_file operations to display the current
- * pid value.
- */
-int trace_pid_show(struct seq_file *m, void *v)
-{
- unsigned long pid = (unsigned long)v - 1;
-
- seq_printf(m, "%lu\n", pid);
- return 0;
-}
-
-/* 128 should be much more than enough */
-#define PID_BUF_SIZE 127
-
-int trace_pid_write(struct trace_pid_list *filtered_pids,
- struct trace_pid_list **new_pid_list,
- const char __user *ubuf, size_t cnt)
-{
- struct trace_pid_list *pid_list;
- struct trace_parser parser;
- unsigned long val;
- int nr_pids = 0;
- ssize_t read = 0;
- ssize_t ret;
- loff_t pos;
- pid_t pid;
-
- if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
- return -ENOMEM;
-
- /*
- * Always recreate a new array. The write is an all or nothing
- * operation. Always create a new array when adding new pids by
- * the user. If the operation fails, then the current list is
- * not modified.
- */
- pid_list = trace_pid_list_alloc();
- if (!pid_list) {
- trace_parser_put(&parser);
- return -ENOMEM;
- }
-
- if (filtered_pids) {
- /* copy the current bits to the new max */
- ret = trace_pid_list_first(filtered_pids, &pid);
- while (!ret) {
- ret = trace_pid_list_set(pid_list, pid);
- if (ret < 0)
- goto out;
-
- ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
- nr_pids++;
- }
- }
-
- ret = 0;
- while (cnt > 0) {
-
- pos = 0;
-
- ret = trace_get_user(&parser, ubuf, cnt, &pos);
- if (ret < 0)
- break;
-
- read += ret;
- ubuf += ret;
- cnt -= ret;
-
- if (!trace_parser_loaded(&parser))
- break;
-
- ret = -EINVAL;
- if (kstrtoul(parser.buffer, 0, &val))
- break;
-
- pid = (pid_t)val;
-
- if (trace_pid_list_set(pid_list, pid) < 0) {
- ret = -1;
- break;
- }
- nr_pids++;
-
- trace_parser_clear(&parser);
- ret = 0;
- }
- out:
- trace_parser_put(&parser);
-
- if (ret < 0) {
- trace_pid_list_free(pid_list);
- return ret;
- }
-
- if (!nr_pids) {
- /* Cleared the list of pids */
- trace_pid_list_free(pid_list);
- pid_list = NULL;
- }
-
- *new_pid_list = pid_list;
-
- return read;
-}
-
static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
{
u64 ts;
diff --git a/kernel/trace/trace_pid.c b/kernel/trace/trace_pid.c
new file mode 100644
index 000000000000..7127c8de4174
--- /dev/null
+++ b/kernel/trace/trace_pid.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "trace.h"
+
+/**
+ * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
+ * @filtered_pids: The list of pids to check
+ * @search_pid: The PID to find in @filtered_pids
+ *
+ * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
+ */
+bool
+trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
+{
+ return trace_pid_list_is_set(filtered_pids, search_pid);
+}
+
+/**
+ * trace_ignore_this_task - should a task be ignored for tracing
+ * @filtered_pids: The list of pids to check
+ * @filtered_no_pids: The list of pids not to be traced
+ * @task: The task that should be ignored if not filtered
+ *
+ * Checks if @task should be traced or not from @filtered_pids.
+ * Returns true if @task should *NOT* be traced.
+ * Returns false if @task should be traced.
+ */
+bool
+trace_ignore_this_task(struct trace_pid_list *filtered_pids,
+ struct trace_pid_list *filtered_no_pids,
+ struct task_struct *task)
+{
+ /*
+ * If filtered_no_pids is not empty, and the task's pid is listed
+ * in filtered_no_pids, then return true.
+ * Otherwise, if filtered_pids is empty, that means we can
+ * trace all tasks. If it has content, then only trace pids
+ * within filtered_pids.
+ */
+
+ return (filtered_pids &&
+ !trace_find_filtered_pid(filtered_pids, task->pid)) ||
+ (filtered_no_pids &&
+ trace_find_filtered_pid(filtered_no_pids, task->pid));
+}
+
+/**
+ * trace_filter_add_remove_task - Add or remove a task from a pid_list
+ * @pid_list: The list to modify
+ * @self: The current task for fork or NULL for exit
+ * @task: The task to add or remove
+ *
+ * If adding a task, if @self is defined, the task is only added if @self
+ * is also included in @pid_list. This happens on fork and tasks should
+ * only be added when the parent is listed. If @self is NULL, then the
+ * @task pid will be removed from the list, which would happen on exit
+ * of a task.
+ */
+void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
+ struct task_struct *self,
+ struct task_struct *task)
+{
+ if (!pid_list)
+ return;
+
+ /* For forks, we only add if the forking task is listed */
+ if (self) {
+ if (!trace_find_filtered_pid(pid_list, self->pid))
+ return;
+ }
+
+ /* "self" is set for forks, and NULL for exits */
+ if (self)
+ trace_pid_list_set(pid_list, task->pid);
+ else
+ trace_pid_list_clear(pid_list, task->pid);
+}
+
+/**
+ * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
+ * @pid_list: The pid list to show
+ * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
+ * @pos: The position of the file
+ *
+ * This is used by the seq_file "next" operation to iterate the pids
+ * listed in a trace_pid_list structure.
+ *
+ * Returns the pid+1 as we want to display pid of zero, but NULL would
+ * stop the iteration.
+ */
+void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
+{
+ long pid = (unsigned long)v;
+ unsigned int next;
+
+ (*pos)++;
+
+ /* pid already is +1 of the actual previous bit */
+ if (trace_pid_list_next(pid_list, pid, &next) < 0)
+ return NULL;
+
+ pid = next;
+
+ /* Return pid + 1 to allow zero to be represented */
+ return (void *)(pid + 1);
+}
+
+/**
+ * trace_pid_start - Used for seq_file to start reading pid lists
+ * @pid_list: The pid list to show
+ * @pos: The position of the file
+ *
+ * This is used by seq_file "start" operation to start the iteration
+ * of listing pids.
+ *
+ * Returns the pid+1 as we want to display pid of zero, but NULL would
+ * stop the iteration.
+ */
+void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
+{
+ unsigned long pid;
+ unsigned int first;
+ loff_t l = 0;
+
+ if (trace_pid_list_first(pid_list, &first) < 0)
+ return NULL;
+
+ pid = first;
+
+ /* Return pid + 1 so that zero can be the exit value */
+ for (pid++; pid && l < *pos;
+ pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
+ ;
+ return (void *)pid;
+}
+
+/**
+ * trace_pid_show - show the current pid in seq_file processing
+ * @m: The seq_file structure to write into
+ * @v: A void pointer of the pid (+1) value to display
+ *
+ * Can be directly used by seq_file operations to display the current
+ * pid value.
+ */
+int trace_pid_show(struct seq_file *m, void *v)
+{
+ unsigned long pid = (unsigned long)v - 1;
+
+ seq_printf(m, "%lu\n", pid);
+ return 0;
+}
+
+/* 128 should be much more than enough */
+#define PID_BUF_SIZE 127
+
+int trace_pid_write(struct trace_pid_list *filtered_pids,
+ struct trace_pid_list **new_pid_list,
+ const char __user *ubuf, size_t cnt)
+{
+ struct trace_pid_list *pid_list;
+ struct trace_parser parser;
+ unsigned long val;
+ int nr_pids = 0;
+ ssize_t read = 0;
+ ssize_t ret;
+ loff_t pos;
+ pid_t pid;
+
+ if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
+ return -ENOMEM;
+
+ /*
+ * Always recreate a new array. The write is an all or nothing
+ * operation. Always create a new array when adding new pids by
+ * the user. If the operation fails, then the current list is
+ * not modified.
+ */
+ pid_list = trace_pid_list_alloc();
+ if (!pid_list) {
+ trace_parser_put(&parser);
+ return -ENOMEM;
+ }
+
+ if (filtered_pids) {
+ /* copy the current bits to the new max */
+ ret = trace_pid_list_first(filtered_pids, &pid);
+ while (!ret) {
+ ret = trace_pid_list_set(pid_list, pid);
+ if (ret < 0)
+ goto out;
+
+ ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
+ nr_pids++;
+ }
+ }
+
+ ret = 0;
+ while (cnt > 0) {
+
+ pos = 0;
+
+ ret = trace_get_user(&parser, ubuf, cnt, &pos);
+ if (ret < 0)
+ break;
+
+ read += ret;
+ ubuf += ret;
+ cnt -= ret;
+
+ if (!trace_parser_loaded(&parser))
+ break;
+
+ ret = -EINVAL;
+ if (kstrtoul(parser.buffer, 0, &val))
+ break;
+
+ pid = (pid_t)val;
+
+ if (trace_pid_list_set(pid_list, pid) < 0) {
+ ret = -1;
+ break;
+ }
+ nr_pids++;
+
+ trace_parser_clear(&parser);
+ ret = 0;
+ }
+ out:
+ trace_parser_put(&parser);
+
+ if (ret < 0) {
+ trace_pid_list_free(pid_list);
+ return ret;
+ }
+
+ if (!nr_pids) {
+ /* Cleared the list of pids */
+ trace_pid_list_free(pid_list);
+ pid_list = NULL;
+ }
+
+ *new_pid_list = pid_list;
+
+ return read;
+}
+
--
2.51.0
^ permalink raw reply related [flat|nested] 11+ messages in thread