* [RFC][PATCH 01/13] ftrace: Replace FTRACE_FL_NOTRACE flag with a hash of ignored functions
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 02/13] ftrace: Use hash instead for FTRACE_FL_FILTER Steven Rostedt
` (12 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0001-ftrace-Replace-FTRACE_FL_NOTRACE-flag-with-a-hash-of.patch --]
[-- Type: text/plain, Size: 7801 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
To prepare for the accounting system that will allow multiple users of
the function tracer, having the FTRACE_FL_NOTRACE as a flag in the
dyn_trace record does not make sense.
All ftrace_ops will soon have a hash of functions they should trace
and not trace. By making a global hash of functions not to trace makes
this easier for the transition.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ftrace.h | 1 -
kernel/trace/ftrace.c | 176 +++++++++++++++++++++++++++++++++++++++++-------
2 files changed, 150 insertions(+), 27 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3204744..fe0a90a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -149,7 +149,6 @@ enum {
FTRACE_FL_FREE = (1 << 0),
FTRACE_FL_FILTER = (1 << 1),
FTRACE_FL_ENABLED = (1 << 2),
- FTRACE_FL_NOTRACE = (1 << 3),
};
struct dyn_ftrace {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d340634..04c002a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -57,6 +57,7 @@
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
+#define FTRACE_HASH_MAX_BITS 10
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
@@ -865,6 +866,22 @@ enum {
FTRACE_START_FUNC_RET = (1 << 3),
FTRACE_STOP_FUNC_RET = (1 << 4),
};
+struct ftrace_func_entry {
+ struct hlist_node hlist;
+ unsigned long ip;
+};
+
+struct ftrace_hash {
+ unsigned long size_bits;
+ struct hlist_head *buckets;
+ unsigned long count;
+};
+
+static struct hlist_head notrace_buckets[1 << FTRACE_HASH_MAX_BITS];
+static struct ftrace_hash notrace_hash = {
+ .size_bits = FTRACE_HASH_MAX_BITS,
+ .buckets = notrace_buckets,
+};
static int ftrace_filtered;
@@ -889,6 +906,79 @@ static struct ftrace_page *ftrace_pages;
static struct dyn_ftrace *ftrace_free_records;
+static struct ftrace_func_entry *
+ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
+{
+ unsigned long key;
+ struct ftrace_func_entry *entry;
+ struct hlist_head *hhd;
+ struct hlist_node *n;
+
+ if (!hash->count)
+ return NULL;
+
+ if (hash->size_bits > 0)
+ key = hash_long(ip, hash->size_bits);
+ else
+ key = 0;
+
+ hhd = &hash->buckets[key];
+
+ hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
+ if (entry->ip == ip)
+ return entry;
+ }
+ return NULL;
+}
+
+static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+{
+ struct ftrace_func_entry *entry;
+ struct hlist_head *hhd;
+ unsigned long key;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ if (hash->size_bits)
+ key = hash_long(ip, hash->size_bits);
+ else
+ key = 0;
+
+ entry->ip = ip;
+ hhd = &hash->buckets[key];
+ hlist_add_head(&entry->hlist, hhd);
+ hash->count++;
+
+ return 0;
+}
+
+static void
+remove_hash_entry(struct ftrace_hash *hash,
+ struct ftrace_func_entry *entry)
+{
+ hlist_del(&entry->hlist);
+ kfree(entry);
+ hash->count--;
+}
+
+static void ftrace_hash_clear(struct ftrace_hash *hash)
+{
+ struct hlist_head *hhd;
+ struct hlist_node *tp, *tn;
+ struct ftrace_func_entry *entry;
+ int size = 1 << hash->size_bits;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ hhd = &hash->buckets[i];
+ hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
+ remove_hash_entry(hash, entry);
+ }
+ FTRACE_WARN_ON(hash->count);
+}
+
/*
* This is a double for. Do not use 'break' to break out of the loop,
* you must use a goto.
@@ -1032,7 +1122,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
* If we want to enable it and filtering is on, enable it only if
* it's filtered
*/
- if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
+ if (enable && !ftrace_lookup_ip(¬race_hash, rec->ip)) {
if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
flag = FTRACE_FL_ENABLED;
}
@@ -1465,7 +1555,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
!(rec->flags & FTRACE_FL_FILTER)) ||
((iter->flags & FTRACE_ITER_NOTRACE) &&
- !(rec->flags & FTRACE_FL_NOTRACE))) {
+ !ftrace_lookup_ip(¬race_hash, rec->ip))) {
rec = NULL;
goto retry;
}
@@ -1609,14 +1699,15 @@ static void ftrace_filter_reset(int enable)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;
- unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
mutex_lock(&ftrace_lock);
- if (enable)
+ if (enable) {
ftrace_filtered = 0;
- do_for_each_ftrace_rec(pg, rec) {
- rec->flags &= ~type;
- } while_for_each_ftrace_rec();
+ do_for_each_ftrace_rec(pg, rec) {
+ rec->flags &= ~FTRACE_FL_FILTER;
+ } while_for_each_ftrace_rec();
+ } else
+ ftrace_hash_clear(¬race_hash);
mutex_unlock(&ftrace_lock);
}
@@ -1716,13 +1807,36 @@ static int ftrace_match(char *str, char *regex, int len, int type)
return matched;
}
-static void
-update_record(struct dyn_ftrace *rec, unsigned long flag, int not)
+static int
+update_record(struct dyn_ftrace *rec, int enable, int not)
{
- if (not)
- rec->flags &= ~flag;
- else
- rec->flags |= flag;
+ struct ftrace_func_entry *entry;
+ struct ftrace_hash *hash = ¬race_hash;
+ int ret = 0;
+
+ if (enable) {
+ if (not)
+ rec->flags &= ~FTRACE_FL_FILTER;
+ else
+ rec->flags |= FTRACE_FL_FILTER;
+ } else {
+ if (not) {
+ /* Do nothing if it doesn't exist */
+ entry = ftrace_lookup_ip(hash, rec->ip);
+ if (!entry)
+ return 0;
+
+ remove_hash_entry(hash, entry);
+ } else {
+ /* Do nothing if it exists */
+ entry = ftrace_lookup_ip(hash, rec->ip);
+ if (entry)
+ return 0;
+
+ ret = add_hash_entry(hash, rec->ip);
+ }
+ }
+ return ret;
}
static int
@@ -1754,16 +1868,14 @@ static int match_records(char *buff, int len, char *mod, int enable, int not)
struct dyn_ftrace *rec;
int type = MATCH_FULL;
char *search = buff;
- unsigned long flag;
int found = 0;
+ int ret;
if (len) {
type = filter_parse_regex(buff, len, &search, ¬);
search_len = strlen(search);
}
- flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
-
mutex_lock(&ftrace_lock);
if (unlikely(ftrace_disabled))
@@ -1772,7 +1884,11 @@ static int match_records(char *buff, int len, char *mod, int enable, int not)
do_for_each_ftrace_rec(pg, rec) {
if (ftrace_match_record(rec, mod, search, search_len, type)) {
- update_record(rec, flag, not);
+ ret = update_record(rec, enable, not);
+ if (ret < 0) {
+ found = ret;
+ goto out_unlock;
+ }
found = 1;
}
/*
@@ -1821,6 +1937,7 @@ static int
ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
{
char *mod;
+ int ret = -EINVAL;
/*
* cmd == 'mod' because we only registered this func
@@ -1832,15 +1949,19 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
/* we must have a module name */
if (!param)
- return -EINVAL;
+ return ret;
mod = strsep(¶m, ":");
if (!strlen(mod))
- return -EINVAL;
+ return ret;
- if (ftrace_match_module_records(func, mod, enable))
- return 0;
- return -EINVAL;
+ ret = ftrace_match_module_records(func, mod, enable);
+ if (!ret)
+ ret = -EINVAL;
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
static struct ftrace_func_command ftrace_mod_cmd = {
@@ -2132,14 +2253,17 @@ static int ftrace_process_regex(char *buff, int len, int enable)
{
char *func, *command, *next = buff;
struct ftrace_func_command *p;
- int ret = -EINVAL;
+ int ret;
func = strsep(&next, ":");
if (!next) {
- if (ftrace_match_records(func, len, enable))
- return 0;
- return ret;
+ ret = ftrace_match_records(func, len, enable);
+ if (!ret)
+ ret = -EINVAL;
+ if (ret < 0)
+ return ret;
+ return 0;
}
/* command found */
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 02/13] ftrace: Use hash instead for FTRACE_FL_FILTER
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 01/13] ftrace: Replace FTRACE_FL_NOTRACE flag with a hash of ignored functions Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 03/13] ftrace: Create a global_ops to hold the filter and notrace hashes Steven Rostedt
` (11 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0002-ftrace-Use-hash-instead-for-FTRACE_FL_FILTER.patch --]
[-- Type: text/plain, Size: 12304 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
When multiple users are allowed to have their own set of functions
to trace, having the FTRACE_FL_FILTER flag will not be enough to
handle the accounting of those users. Each user will need their own
set of functions.
Replace the FTRACE_FL_FILTER with a filter_hash instead. This is
temporary until the rest of the function filtering accounting
gets in.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ftrace.h | 3 +-
kernel/trace/ftrace.c | 151 ++++++++++++++++++++++--------------------------
2 files changed, 70 insertions(+), 84 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index fe0a90a..52fc5d4 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -147,8 +147,7 @@ extern int ftrace_text_reserved(void *start, void *end);
enum {
FTRACE_FL_FREE = (1 << 0),
- FTRACE_FL_FILTER = (1 << 1),
- FTRACE_FL_ENABLED = (1 << 2),
+ FTRACE_FL_ENABLED = (1 << 1),
};
struct dyn_ftrace {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 04c002a..222eca4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -883,7 +883,11 @@ static struct ftrace_hash notrace_hash = {
.buckets = notrace_buckets,
};
-static int ftrace_filtered;
+static struct hlist_head filter_buckets[1 << FTRACE_HASH_MAX_BITS];
+static struct ftrace_hash filter_hash = {
+ .size_bits = FTRACE_HASH_MAX_BITS,
+ .buckets = filter_buckets,
+};
static struct dyn_ftrace *ftrace_new_addrs;
@@ -1123,7 +1127,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
* it's filtered
*/
if (enable && !ftrace_lookup_ip(¬race_hash, rec->ip)) {
- if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
+ if (!filter_hash.count || ftrace_lookup_ip(&filter_hash, rec->ip))
flag = FTRACE_FL_ENABLED;
}
@@ -1430,6 +1434,7 @@ struct ftrace_iterator {
struct dyn_ftrace *func;
struct ftrace_func_probe *probe;
struct trace_parser parser;
+ struct ftrace_hash *hash;
int hidx;
int idx;
unsigned flags;
@@ -1552,7 +1557,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
if ((rec->flags & FTRACE_FL_FREE) ||
((iter->flags & FTRACE_ITER_FILTER) &&
- !(rec->flags & FTRACE_FL_FILTER)) ||
+ !(ftrace_lookup_ip(&filter_hash, rec->ip))) ||
((iter->flags & FTRACE_ITER_NOTRACE) &&
!ftrace_lookup_ip(¬race_hash, rec->ip))) {
@@ -1598,7 +1603,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
* off, we can short cut and just print out that all
* functions are enabled.
*/
- if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
+ if (iter->flags & FTRACE_ITER_FILTER && !filter_hash.count) {
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
@@ -1695,24 +1700,16 @@ ftrace_avail_open(struct inode *inode, struct file *file)
return ret;
}
-static void ftrace_filter_reset(int enable)
+static void ftrace_filter_reset(struct ftrace_hash *hash)
{
- struct ftrace_page *pg;
- struct dyn_ftrace *rec;
-
mutex_lock(&ftrace_lock);
- if (enable) {
- ftrace_filtered = 0;
- do_for_each_ftrace_rec(pg, rec) {
- rec->flags &= ~FTRACE_FL_FILTER;
- } while_for_each_ftrace_rec();
- } else
- ftrace_hash_clear(¬race_hash);
+ ftrace_hash_clear(hash);
mutex_unlock(&ftrace_lock);
}
static int
-ftrace_regex_open(struct inode *inode, struct file *file, int enable)
+ftrace_regex_open(struct ftrace_hash *hash, int flag,
+ struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
int ret = 0;
@@ -1729,15 +1726,16 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
return -ENOMEM;
}
+ iter->hash = hash;
+
mutex_lock(&ftrace_regex_lock);
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
- ftrace_filter_reset(enable);
+ ftrace_filter_reset(hash);
if (file->f_mode & FMODE_READ) {
iter->pg = ftrace_pages_start;
- iter->flags = enable ? FTRACE_ITER_FILTER :
- FTRACE_ITER_NOTRACE;
+ iter->flags = flag;
ret = seq_open(file, &show_ftrace_seq_ops);
if (!ret) {
@@ -1757,13 +1755,15 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
- return ftrace_regex_open(inode, file, 1);
+ return ftrace_regex_open(&filter_hash, FTRACE_ITER_FILTER,
+ inode, file);
}
static int
ftrace_notrace_open(struct inode *inode, struct file *file)
{
- return ftrace_regex_open(inode, file, 0);
+ return ftrace_regex_open(¬race_hash, FTRACE_ITER_NOTRACE,
+ inode, file);
}
static loff_t
@@ -1808,33 +1808,24 @@ static int ftrace_match(char *str, char *regex, int len, int type)
}
static int
-update_record(struct dyn_ftrace *rec, int enable, int not)
+enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
{
struct ftrace_func_entry *entry;
- struct ftrace_hash *hash = ¬race_hash;
int ret = 0;
- if (enable) {
- if (not)
- rec->flags &= ~FTRACE_FL_FILTER;
- else
- rec->flags |= FTRACE_FL_FILTER;
- } else {
- if (not) {
- /* Do nothing if it doesn't exist */
- entry = ftrace_lookup_ip(hash, rec->ip);
- if (!entry)
- return 0;
+ entry = ftrace_lookup_ip(hash, rec->ip);
+ if (not) {
+ /* Do nothing if it doesn't exist */
+ if (!entry)
+ return 0;
- remove_hash_entry(hash, entry);
- } else {
- /* Do nothing if it exists */
- entry = ftrace_lookup_ip(hash, rec->ip);
- if (entry)
- return 0;
+ remove_hash_entry(hash, entry);
+ } else {
+ /* Do nothing if it exists */
+ if (entry)
+ return 0;
- ret = add_hash_entry(hash, rec->ip);
- }
+ ret = add_hash_entry(hash, rec->ip);
}
return ret;
}
@@ -1861,7 +1852,9 @@ ftrace_match_record(struct dyn_ftrace *rec, char *mod,
return ftrace_match(str, regex, len, type);
}
-static int match_records(char *buff, int len, char *mod, int enable, int not)
+static int
+match_records(struct ftrace_hash *hash, char *buff,
+ int len, char *mod, int not)
{
unsigned search_len = 0;
struct ftrace_page *pg;
@@ -1884,20 +1877,13 @@ static int match_records(char *buff, int len, char *mod, int enable, int not)
do_for_each_ftrace_rec(pg, rec) {
if (ftrace_match_record(rec, mod, search, search_len, type)) {
- ret = update_record(rec, enable, not);
+ ret = enter_record(hash, rec, not);
if (ret < 0) {
found = ret;
goto out_unlock;
}
found = 1;
}
- /*
- * Only enable filtering if we have a function that
- * is filtered on.
- */
- if (enable && (rec->flags & FTRACE_FL_FILTER))
- ftrace_filtered = 1;
-
} while_for_each_ftrace_rec();
out_unlock:
mutex_unlock(&ftrace_lock);
@@ -1906,12 +1892,13 @@ static int match_records(char *buff, int len, char *mod, int enable, int not)
}
static int
-ftrace_match_records(char *buff, int len, int enable)
+ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
{
- return match_records(buff, len, NULL, enable, 0);
+ return match_records(hash, buff, len, NULL, 0);
}
-static int ftrace_match_module_records(char *buff, char *mod, int enable)
+static int
+ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
{
int not = 0;
@@ -1925,7 +1912,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable)
not = 1;
}
- return match_records(buff, strlen(buff), mod, enable, not);
+ return match_records(hash, buff, strlen(buff), mod, not);
}
/*
@@ -1936,6 +1923,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable)
static int
ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
{
+ struct ftrace_hash *hash;
char *mod;
int ret = -EINVAL;
@@ -1955,7 +1943,12 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
if (!strlen(mod))
return ret;
- ret = ftrace_match_module_records(func, mod, enable);
+ if (enable)
+ hash = &filter_hash;
+ else
+ hash = ¬race_hash;
+
+ ret = ftrace_match_module_records(hash, func, mod);
if (!ret)
ret = -EINVAL;
if (ret < 0)
@@ -2253,12 +2246,18 @@ static int ftrace_process_regex(char *buff, int len, int enable)
{
char *func, *command, *next = buff;
struct ftrace_func_command *p;
+ struct ftrace_hash *hash;
int ret;
+ if (enable)
+ hash = &filter_hash;
+ else
+ hash = ¬race_hash;
+
func = strsep(&next, ":");
if (!next) {
- ret = ftrace_match_records(func, len, enable);
+ ret = ftrace_match_records(hash, func, len);
if (!ret)
ret = -EINVAL;
if (ret < 0)
@@ -2340,16 +2339,16 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
}
static void
-ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
+ftrace_set_regex(struct ftrace_hash *hash, unsigned char *buf, int len, int reset)
{
if (unlikely(ftrace_disabled))
return;
mutex_lock(&ftrace_regex_lock);
if (reset)
- ftrace_filter_reset(enable);
+ ftrace_filter_reset(hash);
if (buf)
- ftrace_match_records(buf, len, enable);
+ ftrace_match_records(hash, buf, len);
mutex_unlock(&ftrace_regex_lock);
}
@@ -2364,7 +2363,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
*/
void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
- ftrace_set_regex(buf, len, reset, 1);
+ ftrace_set_regex(&filter_hash, buf, len, reset);
}
/**
@@ -2379,7 +2378,7 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset)
*/
void ftrace_set_notrace(unsigned char *buf, int len, int reset)
{
- ftrace_set_regex(buf, len, reset, 0);
+ ftrace_set_regex(¬race_hash, buf, len, reset);
}
/*
@@ -2431,22 +2430,22 @@ static void __init set_ftrace_early_graph(char *buf)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-static void __init set_ftrace_early_filter(char *buf, int enable)
+static void __init set_ftrace_early_filter(struct ftrace_hash *hash, char *buf)
{
char *func;
while (buf) {
func = strsep(&buf, ",");
- ftrace_set_regex(func, strlen(func), 0, enable);
+ ftrace_set_regex(hash, func, strlen(func), 0);
}
}
static void __init set_ftrace_early_filters(void)
{
if (ftrace_filter_buf[0])
- set_ftrace_early_filter(ftrace_filter_buf, 1);
+ set_ftrace_early_filter(&filter_hash, ftrace_filter_buf);
if (ftrace_notrace_buf[0])
- set_ftrace_early_filter(ftrace_notrace_buf, 0);
+ set_ftrace_early_filter(¬race_hash, ftrace_notrace_buf);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_graph_buf[0])
set_ftrace_early_graph(ftrace_graph_buf);
@@ -2454,7 +2453,7 @@ static void __init set_ftrace_early_filters(void)
}
static int
-ftrace_regex_release(struct inode *inode, struct file *file, int enable)
+ftrace_regex_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
struct ftrace_iterator *iter;
@@ -2471,7 +2470,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
parser = &iter->parser;
if (trace_parser_loaded(parser)) {
parser->buffer[parser->idx] = 0;
- ftrace_match_records(parser->buffer, parser->idx, enable);
+ ftrace_match_records(iter->hash, parser->buffer, parser->idx);
}
trace_parser_put(parser);
@@ -2488,18 +2487,6 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
return 0;
}
-static int
-ftrace_filter_release(struct inode *inode, struct file *file)
-{
- return ftrace_regex_release(inode, file, 1);
-}
-
-static int
-ftrace_notrace_release(struct inode *inode, struct file *file)
-{
- return ftrace_regex_release(inode, file, 0);
-}
-
static const struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
@@ -2512,7 +2499,7 @@ static const struct file_operations ftrace_filter_fops = {
.read = seq_read,
.write = ftrace_filter_write,
.llseek = ftrace_regex_lseek,
- .release = ftrace_filter_release,
+ .release = ftrace_regex_release,
};
static const struct file_operations ftrace_notrace_fops = {
@@ -2520,7 +2507,7 @@ static const struct file_operations ftrace_notrace_fops = {
.read = seq_read,
.write = ftrace_notrace_write,
.llseek = ftrace_regex_lseek,
- .release = ftrace_notrace_release,
+ .release = ftrace_regex_release,
};
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 03/13] ftrace: Create a global_ops to hold the filter and notrace hashes
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 01/13] ftrace: Replace FTRACE_FL_NOTRACE flag with a hash of ignored functions Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 02/13] ftrace: Use hash instead for FTRACE_FL_FILTER Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 04/13] ftrace: Separate hash allocation and assignment Steven Rostedt
` (10 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0003-ftrace-Create-a-global_ops-to-hold-the-filter-and-no.patch --]
[-- Type: text/plain, Size: 8100 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
Combine the filter and notrace hashes to be accessed by a single entity,
the global_ops. The global_ops is a ftrace_ops structure that is passed
to different functions that can read or modify the filtering of the
function tracer.
The ftrace_ops structure was modified to hold a filter and notrace
hashes so that later patches may allow each ftrace_ops to have its own
set of rules to what functions may be filtered.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ftrace.h | 10 ++++++-
kernel/trace/ftrace.c | 65 ++++++++++++++++++++++++++++++++++--------------
2 files changed, 54 insertions(+), 21 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 52fc5d4..6658a51 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -29,9 +29,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+struct ftrace_hash;
+
struct ftrace_ops {
- ftrace_func_t func;
- struct ftrace_ops *next;
+ ftrace_func_t func;
+ struct ftrace_ops *next;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash;
+#endif
};
extern int function_trace_stop;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 222eca4..a517a6c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -889,6 +889,12 @@ static struct ftrace_hash filter_hash = {
.buckets = filter_buckets,
};
+struct ftrace_ops global_ops = {
+ .func = ftrace_stub,
+ .notrace_hash = ¬race_hash,
+ .filter_hash = &filter_hash,
+};
+
static struct dyn_ftrace *ftrace_new_addrs;
static DEFINE_MUTEX(ftrace_regex_lock);
@@ -1112,6 +1118,7 @@ int ftrace_text_reserved(void *start, void *end)
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
+ struct ftrace_ops *ops = &global_ops;
unsigned long ftrace_addr;
unsigned long flag = 0UL;
@@ -1126,8 +1133,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
* If we want to enable it and filtering is on, enable it only if
* it's filtered
*/
- if (enable && !ftrace_lookup_ip(¬race_hash, rec->ip)) {
- if (!filter_hash.count || ftrace_lookup_ip(&filter_hash, rec->ip))
+ if (enable && !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) {
+ if (!ops->filter_hash->count ||
+ ftrace_lookup_ip(ops->filter_hash, rec->ip))
flag = FTRACE_FL_ENABLED;
}
@@ -1531,6 +1539,7 @@ static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
+ struct ftrace_ops *ops = &global_ops;
struct dyn_ftrace *rec = NULL;
if (unlikely(ftrace_disabled))
@@ -1557,10 +1566,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
if ((rec->flags & FTRACE_FL_FREE) ||
((iter->flags & FTRACE_ITER_FILTER) &&
- !(ftrace_lookup_ip(&filter_hash, rec->ip))) ||
+ !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
((iter->flags & FTRACE_ITER_NOTRACE) &&
- !ftrace_lookup_ip(¬race_hash, rec->ip))) {
+ !ftrace_lookup_ip(ops->notrace_hash, rec->ip))) {
rec = NULL;
goto retry;
}
@@ -1584,6 +1593,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
static void *t_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
+ struct ftrace_ops *ops = &global_ops;
void *p = NULL;
loff_t l;
@@ -1603,7 +1613,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
* off, we can short cut and just print out that all
* functions are enabled.
*/
- if (iter->flags & FTRACE_ITER_FILTER && !filter_hash.count) {
+ if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
@@ -1708,10 +1718,11 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
}
static int
-ftrace_regex_open(struct ftrace_hash *hash, int flag,
+ftrace_regex_open(struct ftrace_ops *ops, int flag,
struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
+ struct ftrace_hash *hash;
int ret = 0;
if (unlikely(ftrace_disabled))
@@ -1726,6 +1737,11 @@ ftrace_regex_open(struct ftrace_hash *hash, int flag,
return -ENOMEM;
}
+ if (flag & FTRACE_ITER_NOTRACE)
+ hash = ops->notrace_hash;
+ else
+ hash = ops->filter_hash;
+
iter->hash = hash;
mutex_lock(&ftrace_regex_lock);
@@ -1755,14 +1771,14 @@ ftrace_regex_open(struct ftrace_hash *hash, int flag,
static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
- return ftrace_regex_open(&filter_hash, FTRACE_ITER_FILTER,
+ return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
inode, file);
}
static int
ftrace_notrace_open(struct inode *inode, struct file *file)
{
- return ftrace_regex_open(¬race_hash, FTRACE_ITER_NOTRACE,
+ return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
inode, file);
}
@@ -1923,6 +1939,7 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
static int
ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
{
+ struct ftrace_ops *ops = &global_ops;
struct ftrace_hash *hash;
char *mod;
int ret = -EINVAL;
@@ -1944,9 +1961,9 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
return ret;
if (enable)
- hash = &filter_hash;
+ hash = ops->filter_hash;
else
- hash = ¬race_hash;
+ hash = ops->notrace_hash;
ret = ftrace_match_module_records(hash, func, mod);
if (!ret)
@@ -2245,14 +2262,15 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd)
static int ftrace_process_regex(char *buff, int len, int enable)
{
char *func, *command, *next = buff;
+ struct ftrace_ops *ops = &global_ops;
struct ftrace_func_command *p;
struct ftrace_hash *hash;
int ret;
if (enable)
- hash = &filter_hash;
+ hash = ops->filter_hash;
else
- hash = ¬race_hash;
+ hash = ops->notrace_hash;
func = strsep(&next, ":");
@@ -2339,11 +2357,19 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
}
static void
-ftrace_set_regex(struct ftrace_hash *hash, unsigned char *buf, int len, int reset)
+ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+ int reset, int enable)
{
+ struct ftrace_hash *hash;
+
if (unlikely(ftrace_disabled))
return;
+ if (enable)
+ hash = ops->filter_hash;
+ else
+ hash = ops->notrace_hash;
+
mutex_lock(&ftrace_regex_lock);
if (reset)
ftrace_filter_reset(hash);
@@ -2363,7 +2389,7 @@ ftrace_set_regex(struct ftrace_hash *hash, unsigned char *buf, int len, int rese
*/
void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
- ftrace_set_regex(&filter_hash, buf, len, reset);
+ ftrace_set_regex(&global_ops, buf, len, reset, 1);
}
/**
@@ -2378,7 +2404,7 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset)
*/
void ftrace_set_notrace(unsigned char *buf, int len, int reset)
{
- ftrace_set_regex(¬race_hash, buf, len, reset);
+ ftrace_set_regex(&global_ops, buf, len, reset, 0);
}
/*
@@ -2430,22 +2456,23 @@ static void __init set_ftrace_early_graph(char *buf)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-static void __init set_ftrace_early_filter(struct ftrace_hash *hash, char *buf)
+static void __init
+set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
{
char *func;
while (buf) {
func = strsep(&buf, ",");
- ftrace_set_regex(hash, func, strlen(func), 0);
+ ftrace_set_regex(ops, func, strlen(func), 0, enable);
}
}
static void __init set_ftrace_early_filters(void)
{
if (ftrace_filter_buf[0])
- set_ftrace_early_filter(&filter_hash, ftrace_filter_buf);
+ set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
if (ftrace_notrace_buf[0])
- set_ftrace_early_filter(¬race_hash, ftrace_notrace_buf);
+ set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_graph_buf[0])
set_ftrace_early_graph(ftrace_graph_buf);
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 04/13] ftrace: Separate hash allocation and assignment
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (2 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 03/13] ftrace: Create a global_ops to hold the filter and notrace hashes Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 05/13] ftrace: Use counters to enable functions to trace Steven Rostedt
` (9 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0004-ftrace-Separate-hash-allocation-and-assignment.patch --]
[-- Type: text/plain, Size: 11551 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
When filtering, allocate a hash to insert the function records.
After the filtering is complete, assign it to the ftrace_ops structure.
This allows the ftrace_ops structure to have a much smaller array of
hash buckets instead of wasting a lot of memory.
A read only empty_hash is created to be the minimum size that any ftrace_ops
can point to.
When a new hash is created, it has the following steps:
o Allocate a default hash.
o Walk the function records assigning the filtered records to the hash
o Allocate a new hash with the appropriate size buckets
o Move the entries from the default hash to the new hash.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ftrace.c | 275 +++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 233 insertions(+), 42 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a517a6c..46f0826 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -57,7 +57,8 @@
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
-#define FTRACE_HASH_MAX_BITS 10
+#define FTRACE_HASH_DEFAULT_BITS 10
+#define FTRACE_HASH_MAX_BITS 12
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
@@ -877,22 +878,22 @@ struct ftrace_hash {
unsigned long count;
};
-static struct hlist_head notrace_buckets[1 << FTRACE_HASH_MAX_BITS];
-static struct ftrace_hash notrace_hash = {
- .size_bits = FTRACE_HASH_MAX_BITS,
- .buckets = notrace_buckets,
-};
-
-static struct hlist_head filter_buckets[1 << FTRACE_HASH_MAX_BITS];
-static struct ftrace_hash filter_hash = {
- .size_bits = FTRACE_HASH_MAX_BITS,
- .buckets = filter_buckets,
+/*
+ * We make these constant because no one should touch them,
+ * but they are used as the default "empty hash", to avoid allocating
+ * it all the time. These are in a read only section such that if
+ * anyone does try to modify it, it will cause an exception.
+ */
+static const struct hlist_head empty_buckets[1];
+static const struct ftrace_hash empty_hash = {
+ .buckets = (struct hlist_head *)empty_buckets,
};
+#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
struct ftrace_ops global_ops = {
.func = ftrace_stub,
- .notrace_hash = ¬race_hash,
- .filter_hash = &filter_hash,
+ .notrace_hash = EMPTY_HASH,
+ .filter_hash = EMPTY_HASH,
};
static struct dyn_ftrace *ftrace_new_addrs;
@@ -941,31 +942,38 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
return NULL;
}
-static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+static void __add_hash_entry(struct ftrace_hash *hash,
+ struct ftrace_func_entry *entry)
{
- struct ftrace_func_entry *entry;
struct hlist_head *hhd;
unsigned long key;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
if (hash->size_bits)
- key = hash_long(ip, hash->size_bits);
+ key = hash_long(entry->ip, hash->size_bits);
else
key = 0;
- entry->ip = ip;
hhd = &hash->buckets[key];
hlist_add_head(&entry->hlist, hhd);
hash->count++;
+}
+
+static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+{
+ struct ftrace_func_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->ip = ip;
+ __add_hash_entry(hash, entry);
return 0;
}
static void
-remove_hash_entry(struct ftrace_hash *hash,
+free_hash_entry(struct ftrace_hash *hash,
struct ftrace_func_entry *entry)
{
hlist_del(&entry->hlist);
@@ -973,6 +981,14 @@ remove_hash_entry(struct ftrace_hash *hash,
hash->count--;
}
+static void
+remove_hash_entry(struct ftrace_hash *hash,
+ struct ftrace_func_entry *entry)
+{
+ hlist_del(&entry->hlist);
+ hash->count--;
+}
+
static void ftrace_hash_clear(struct ftrace_hash *hash)
{
struct hlist_head *hhd;
@@ -981,14 +997,156 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
int size = 1 << hash->size_bits;
int i;
+ if (!hash->count)
+ return;
+
for (i = 0; i < size; i++) {
hhd = &hash->buckets[i];
hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
- remove_hash_entry(hash, entry);
+ free_hash_entry(hash, entry);
}
FTRACE_WARN_ON(hash->count);
}
+static void free_ftrace_hash(struct ftrace_hash *hash)
+{
+ if (!hash || hash == EMPTY_HASH)
+ return;
+ ftrace_hash_clear(hash);
+ kfree(hash->buckets);
+ kfree(hash);
+}
+
+static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
+{
+ struct ftrace_hash *hash;
+ int size;
+
+ hash = kzalloc(sizeof(*hash), GFP_KERNEL);
+ if (!hash)
+ return NULL;
+
+ size = 1 << size_bits;
+ hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
+
+ if (!hash->buckets) {
+ kfree(hash);
+ return NULL;
+ }
+
+ hash->size_bits = size_bits;
+
+ return hash;
+}
+
+static struct ftrace_hash *
+alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
+{
+ struct ftrace_func_entry *entry;
+ struct ftrace_hash *new_hash;
+ struct hlist_node *tp;
+ int size;
+ int ret;
+ int i;
+
+ new_hash = alloc_ftrace_hash(size_bits);
+ if (!new_hash)
+ return NULL;
+
+ /* Empty hash? */
+ if (!hash || !hash->count)
+ return new_hash;
+
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
+ ret = add_hash_entry(new_hash, entry->ip);
+ if (ret < 0)
+ goto free_hash;
+ }
+ }
+
+ FTRACE_WARN_ON(new_hash->count != hash->count);
+
+ return new_hash;
+
+ free_hash:
+ free_ftrace_hash(new_hash);
+ return NULL;
+}
+
+static int
+ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
+{
+ struct ftrace_func_entry *entry;
+ struct hlist_node *tp, *tn;
+ struct hlist_head *hhd;
+ struct ftrace_hash *hash = *dst;
+ unsigned long key;
+ int size = src->count;
+ int bits = 0;
+ int i;
+
+ /*
+ * If the new source is empty, just free dst and assign it
+ * the empty_hash.
+ */
+ if (!src->count) {
+ free_ftrace_hash(*dst);
+ *dst = EMPTY_HASH;
+ return 0;
+ }
+
+ ftrace_hash_clear(hash);
+
+ /*
+ * Make the hash size about 1/2 the # found
+ */
+ for (size /= 2; size; size >>= 1)
+ bits++;
+
+ /* Don't allocate too much */
+ if (bits > FTRACE_HASH_MAX_BITS)
+ bits = FTRACE_HASH_MAX_BITS;
+
+ /* We can't modify the empty_hash */
+ if (hash == EMPTY_HASH) {
+ /* Create a new hash */
+ *dst = alloc_ftrace_hash(bits);
+ if (!*dst) {
+ *dst = EMPTY_HASH;
+ return -ENOMEM;
+ }
+ hash = *dst;
+ } else {
+ size = 1 << bits;
+
+ /* Use the old hash, but create new buckets */
+ hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
+ if (!hhd)
+ return -ENOMEM;
+
+ kfree(hash->buckets);
+ hash->buckets = hhd;
+ hash->size_bits = bits;
+ }
+
+ size = 1 << src->size_bits;
+ for (i = 0; i < size; i++) {
+ hhd = &src->buckets[i];
+ hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
+ if (bits > 0)
+ key = hash_long(entry->ip, bits);
+ else
+ key = 0;
+ remove_hash_entry(src, entry);
+ __add_hash_entry(hash, entry);
+ }
+ }
+
+ return 0;
+}
+
/*
* This is a double for. Do not use 'break' to break out of the loop,
* you must use a goto.
@@ -1443,6 +1601,7 @@ struct ftrace_iterator {
struct ftrace_func_probe *probe;
struct trace_parser parser;
struct ftrace_hash *hash;
+ struct ftrace_ops *ops;
int hidx;
int idx;
unsigned flags;
@@ -1742,22 +1901,37 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
else
hash = ops->filter_hash;
- iter->hash = hash;
+ iter->ops = ops;
+ iter->flags = flag;
+
+ if (file->f_mode & FMODE_WRITE) {
+ mutex_lock(&ftrace_lock);
+ iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
+ mutex_unlock(&ftrace_lock);
+
+ if (!iter->hash) {
+ trace_parser_put(&iter->parser);
+ kfree(iter);
+ return -ENOMEM;
+ }
+ }
mutex_lock(&ftrace_regex_lock);
+
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
- ftrace_filter_reset(hash);
+ ftrace_filter_reset(iter->hash);
if (file->f_mode & FMODE_READ) {
iter->pg = ftrace_pages_start;
- iter->flags = flag;
ret = seq_open(file, &show_ftrace_seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = iter;
} else {
+ /* Failed */
+ free_ftrace_hash(iter->hash);
trace_parser_put(&iter->parser);
kfree(iter);
}
@@ -1835,7 +2009,7 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
if (!entry)
return 0;
- remove_hash_entry(hash, entry);
+ free_hash_entry(hash, entry);
} else {
/* Do nothing if it exists */
if (entry)
@@ -2259,19 +2433,13 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd)
return ret;
}
-static int ftrace_process_regex(char *buff, int len, int enable)
+static int ftrace_process_regex(struct ftrace_hash *hash,
+ char *buff, int len, int enable)
{
char *func, *command, *next = buff;
- struct ftrace_ops *ops = &global_ops;
struct ftrace_func_command *p;
- struct ftrace_hash *hash;
int ret;
- if (enable)
- hash = ops->filter_hash;
- else
- hash = ops->notrace_hash;
-
func = strsep(&next, ":");
if (!next) {
@@ -2328,7 +2496,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
if (read >= 0 && trace_parser_loaded(parser) &&
!trace_parser_cont(parser)) {
- ret = ftrace_process_regex(parser->buffer,
+ ret = ftrace_process_regex(iter->hash, parser->buffer,
parser->idx, enable);
trace_parser_clear(parser);
if (ret)
@@ -2356,26 +2524,40 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
}
-static void
+static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
int reset, int enable)
{
+ struct ftrace_hash **orig_hash;
struct ftrace_hash *hash;
+ int ret;
if (unlikely(ftrace_disabled))
- return;
+ return -ENODEV;
if (enable)
- hash = ops->filter_hash;
+ orig_hash = &ops->filter_hash;
else
- hash = ops->notrace_hash;
+ orig_hash = &ops->notrace_hash;
+
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ if (!hash)
+ return -ENOMEM;
mutex_lock(&ftrace_regex_lock);
if (reset)
ftrace_filter_reset(hash);
if (buf)
ftrace_match_records(hash, buf, len);
+
+ mutex_lock(&ftrace_lock);
+ ret = ftrace_hash_move(orig_hash, hash);
+ mutex_unlock(&ftrace_lock);
+
mutex_unlock(&ftrace_regex_lock);
+
+ free_ftrace_hash(hash);
+ return ret;
}
/**
@@ -2484,7 +2666,9 @@ ftrace_regex_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
struct ftrace_iterator *iter;
+ struct ftrace_hash **orig_hash;
struct trace_parser *parser;
+ int ret;
mutex_lock(&ftrace_regex_lock);
if (file->f_mode & FMODE_READ) {
@@ -2501,14 +2685,21 @@ ftrace_regex_release(struct inode *inode, struct file *file)
}
trace_parser_put(parser);
- kfree(iter);
if (file->f_mode & FMODE_WRITE) {
+ if (iter->flags & FTRACE_ITER_NOTRACE)
+ orig_hash = &iter->ops->notrace_hash;
+ else
+ orig_hash = &iter->ops->filter_hash;
+
mutex_lock(&ftrace_lock);
- if (ftrace_start_up && ftrace_enabled)
+ ret = ftrace_hash_move(orig_hash, iter->hash);
+ if (!ret && ftrace_start_up && ftrace_enabled)
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
mutex_unlock(&ftrace_lock);
}
+ free_ftrace_hash(iter->hash);
+ kfree(iter);
mutex_unlock(&ftrace_regex_lock);
return 0;
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 05/13] ftrace: Use counters to enable functions to trace
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (3 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 04/13] ftrace: Separate hash allocation and assignment Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 06/13] ftrace: Add enabled_functions file Steven Rostedt
` (8 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0005-ftrace-Use-counters-to-enable-functions-to-trace.patch --]
[-- Type: text/plain, Size: 8535 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
Every function has its own record that stores the instruction
pointer and flags for the function to be traced. There are only
two flags: enabled and free. The enabled flag states that tracing
for the function has been enabled (actively traced), and the free
flag states that the record no longer points to a function and can
be used by new functions (loaded modules).
These flags are now moved to the MSB of the flags (actually just
the top 32bits). The rest of the bits (30 bits) are now used as
a ref counter. Everytime a tracer register functions to trace,
those functions will have its counter incremented.
When tracing is enabled, to determine if a function should be traced,
the counter is examined, and if it is non-zero it is set to trace.
When a ftrace_ops is registered to trace functions, its hashes
are examined. If the ftrace_ops filter_hash count is zero, then
all functions are set to be traced, otherwise only the functions
in the hash are to be traced. The exception to this is if a function
is also in the ftrace_ops notrace_hash. Then that function's counter
is not incremented for this ftrace_ops.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ftrace.h | 8 ++-
kernel/trace/ftrace.c | 158 +++++++++++++++++++++++++++++++++++++++++++-----
2 files changed, 148 insertions(+), 18 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6658a51..ab1c46e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -37,6 +37,7 @@ struct ftrace_ops {
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash;
+ unsigned long flags;
#endif
};
@@ -152,10 +153,13 @@ extern void unregister_ftrace_function_probe_all(char *glob);
extern int ftrace_text_reserved(void *start, void *end);
enum {
- FTRACE_FL_FREE = (1 << 0),
- FTRACE_FL_ENABLED = (1 << 1),
+ FTRACE_FL_ENABLED = (1 << 30),
+ FTRACE_FL_FREE = (1 << 31),
};
+#define FTRACE_FL_MASK (0x3UL << 30)
+#define FTRACE_REF_MAX ((1 << 30) - 1)
+
struct dyn_ftrace {
union {
unsigned long ip; /* address of mcount call-site */
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 46f0826..5dd332c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -890,6 +890,10 @@ static const struct ftrace_hash empty_hash = {
};
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
+enum {
+ FTRACE_OPS_FL_ENABLED = 1,
+};
+
struct ftrace_ops global_ops = {
.func = ftrace_stub,
.notrace_hash = EMPTY_HASH,
@@ -1161,6 +1165,105 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
} \
}
+static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ int filter_hash,
+ bool inc)
+{
+ struct ftrace_hash *hash;
+ struct ftrace_hash *other_hash;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ int count = 0;
+ int all = 0;
+
+ /* Only update if the ops has been registered */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return;
+
+ /*
+ * In the filter_hash case:
+ * If the count is zero, we update all records.
+ * Otherwise we just update the items in the hash.
+ *
+ * In the notrace_hash case:
+ * We enable the update in the hash.
+ * As disabling notrace means enabling the tracing,
+ * and enabling notrace means disabling, the inc variable
+ * gets inversed.
+ */
+ if (filter_hash) {
+ hash = ops->filter_hash;
+ other_hash = ops->notrace_hash;
+ if (!hash->count)
+ all = 1;
+ } else {
+ inc = !inc;
+ hash = ops->notrace_hash;
+ other_hash = ops->filter_hash;
+ /*
+ * If the notrace hash has no items,
+ * then there's nothing to do.
+ */
+ if (!hash->count)
+ return;
+ }
+
+ do_for_each_ftrace_rec(pg, rec) {
+ int in_other_hash = 0;
+ int in_hash = 0;
+ int match = 0;
+
+ if (all) {
+ /*
+ * Only the filter_hash affects all records.
+ * Update if the record is not in the notrace hash.
+ */
+ if (!ftrace_lookup_ip(other_hash, rec->ip))
+ match = 1;
+ } else {
+ in_hash = !!ftrace_lookup_ip(hash, rec->ip);
+ in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
+
+ /*
+ *
+ */
+ if (filter_hash && in_hash && !in_other_hash)
+ match = 1;
+ else if (!filter_hash && in_hash &&
+ (in_other_hash || !other_hash->count))
+ match = 1;
+ }
+ if (!match)
+ continue;
+
+ if (inc) {
+ rec->flags++;
+ if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
+ return;
+ } else {
+ if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
+ return;
+ rec->flags--;
+ }
+ count++;
+ /* Shortcut, if we handled all records, we are done. */
+ if (!all && count == hash->count)
+ return;
+ } while_for_each_ftrace_rec();
+}
+
+static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
+ int filter_hash)
+{
+ __ftrace_hash_rec_update(ops, filter_hash, 0);
+}
+
+static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
+ int filter_hash)
+{
+ __ftrace_hash_rec_update(ops, filter_hash, 1);
+}
+
static void ftrace_free_rec(struct dyn_ftrace *rec)
{
rec->freelist = ftrace_free_records;
@@ -1276,26 +1379,24 @@ int ftrace_text_reserved(void *start, void *end)
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
- struct ftrace_ops *ops = &global_ops;
unsigned long ftrace_addr;
unsigned long flag = 0UL;
ftrace_addr = (unsigned long)FTRACE_ADDR;
/*
- * If this record is not to be traced or we want to disable it,
- * then disable it.
+ * If we are enabling tracing:
*
- * If we want to enable it and filtering is off, then enable it.
+ * If the record has a ref count, then we need to enable it
+ * because someone is using it.
*
- * If we want to enable it and filtering is on, enable it only if
- * it's filtered
+ * Otherwise we make sure its disabled.
+ *
+ * If we are disabling tracing, then disable all records that
+ * are enabled.
*/
- if (enable && !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) {
- if (!ops->filter_hash->count ||
- ftrace_lookup_ip(ops->filter_hash, rec->ip))
- flag = FTRACE_FL_ENABLED;
- }
+ if (enable && (rec->flags & ~FTRACE_FL_MASK))
+ flag = FTRACE_FL_ENABLED;
/* If the state of this record hasn't changed, then do nothing */
if ((rec->flags & FTRACE_FL_ENABLED) == flag)
@@ -1423,17 +1524,25 @@ static void ftrace_startup_enable(int command)
static void ftrace_startup(int command)
{
+ struct ftrace_ops *ops = &global_ops;
+
if (unlikely(ftrace_disabled))
return;
ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
+ ops->flags |= FTRACE_OPS_FL_ENABLED;
+ if (ftrace_start_up == 1)
+ ftrace_hash_rec_enable(ops, 1);
+
ftrace_startup_enable(command);
}
static void ftrace_shutdown(int command)
{
+ struct ftrace_ops *ops = &global_ops;
+
if (unlikely(ftrace_disabled))
return;
@@ -1446,7 +1555,12 @@ static void ftrace_shutdown(int command)
WARN_ON_ONCE(ftrace_start_up < 0);
if (!ftrace_start_up)
+ ftrace_hash_rec_disable(ops, 1);
+
+ if (!ftrace_start_up) {
command |= FTRACE_DISABLE_CALLS;
+ ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+ }
if (saved_ftrace_func != ftrace_trace_function) {
saved_ftrace_func = ftrace_trace_function;
@@ -2668,6 +2782,7 @@ ftrace_regex_release(struct inode *inode, struct file *file)
struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash;
struct trace_parser *parser;
+ int filter_hash;
int ret;
mutex_lock(&ftrace_regex_lock);
@@ -2687,15 +2802,26 @@ ftrace_regex_release(struct inode *inode, struct file *file)
trace_parser_put(parser);
if (file->f_mode & FMODE_WRITE) {
- if (iter->flags & FTRACE_ITER_NOTRACE)
- orig_hash = &iter->ops->notrace_hash;
- else
+ filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
+
+ if (filter_hash)
orig_hash = &iter->ops->filter_hash;
+ else
+ orig_hash = &iter->ops->notrace_hash;
mutex_lock(&ftrace_lock);
+ /*
+ * Remove the current set, update the hash and add
+ * them back.
+ */
+ ftrace_hash_rec_disable(iter->ops, filter_hash);
ret = ftrace_hash_move(orig_hash, iter->hash);
- if (!ret && ftrace_start_up && ftrace_enabled)
- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+ if (!ret) {
+ ftrace_hash_rec_enable(iter->ops, filter_hash);
+ if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
+ && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+ }
mutex_unlock(&ftrace_lock);
}
free_ftrace_hash(iter->hash);
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 06/13] ftrace: Add enabled_functions file
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (4 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 05/13] ftrace: Use counters to enable functions to trace Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 07/13] ftrace: Add ops parameter to ftrace_startup/shutdown functions Steven Rostedt
` (7 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0006-ftrace-Add-enabled_functions-file.patch --]
[-- Type: text/plain, Size: 3083 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
Add the enabled_functions file that is used to show all the
functions that have been enabled for tracing as well as their
ref counts. This helps seeing if any function has been registered
and what functions are being traced.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ftrace.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5dd332c..065f1e6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1703,6 +1703,7 @@ enum {
FTRACE_ITER_NOTRACE = (1 << 1),
FTRACE_ITER_PRINTALL = (1 << 2),
FTRACE_ITER_HASH = (1 << 3),
+ FTRACE_ITER_ENABLED = (1 << 4),
};
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
@@ -1842,7 +1843,11 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
!(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
((iter->flags & FTRACE_ITER_NOTRACE) &&
- !ftrace_lookup_ip(ops->notrace_hash, rec->ip))) {
+ !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
+
+ ((iter->flags & FTRACE_ITER_ENABLED) &&
+ !(rec->flags & ~FTRACE_FL_MASK))) {
+
rec = NULL;
goto retry;
}
@@ -1944,7 +1949,11 @@ static int t_show(struct seq_file *m, void *v)
if (!rec)
return 0;
- seq_printf(m, "%ps\n", (void *)rec->ip);
+ seq_printf(m, "%ps", (void *)rec->ip);
+ if (iter->flags & FTRACE_ITER_ENABLED)
+ seq_printf(m, " (%ld)",
+ rec->flags & ~FTRACE_FL_MASK);
+ seq_printf(m, "\n");
return 0;
}
@@ -1983,6 +1992,34 @@ ftrace_avail_open(struct inode *inode, struct file *file)
return ret;
}
+static int
+ftrace_enabled_open(struct inode *inode, struct file *file)
+{
+ struct ftrace_iterator *iter;
+ int ret;
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+
+ iter->pg = ftrace_pages_start;
+ iter->flags = FTRACE_ITER_ENABLED;
+
+ ret = seq_open(file, &show_ftrace_seq_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+
+ m->private = iter;
+ } else {
+ kfree(iter);
+ }
+
+ return ret;
+}
+
static void ftrace_filter_reset(struct ftrace_hash *hash)
{
mutex_lock(&ftrace_lock);
@@ -2838,6 +2875,13 @@ static const struct file_operations ftrace_avail_fops = {
.release = seq_release_private,
};
+static const struct file_operations ftrace_enabled_fops = {
+ .open = ftrace_enabled_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
.read = seq_read,
@@ -3069,6 +3113,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
trace_create_file("available_filter_functions", 0444,
d_tracer, NULL, &ftrace_avail_fops);
+ trace_create_file("enabled_functions", 0444,
+ d_tracer, NULL, &ftrace_enabled_fops);
+
trace_create_file("set_ftrace_filter", 0644, d_tracer,
NULL, &ftrace_filter_fops);
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 07/13] ftrace: Add ops parameter to ftrace_startup/shutdown functions
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (5 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 06/13] ftrace: Add enabled_functions file Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 08/13] ftrace: Have global_ops store the functions that are to be traced Steven Rostedt
` (6 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0007-ftrace-Add-ops-parameter-to-ftrace_startup-shutdown-.patch --]
[-- Type: text/plain, Size: 3602 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
In order to allow different ops to enable different functions,
the ftrace_startup() and ftrace_shutdown() functions need the
ops parameter passed to them.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ftrace.c | 28 ++++++++++++++--------------
1 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 065f1e6..8fef1d9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1522,10 +1522,8 @@ static void ftrace_startup_enable(int command)
ftrace_run_update_code(command);
}
-static void ftrace_startup(int command)
+static void ftrace_startup(struct ftrace_ops *ops, int command)
{
- struct ftrace_ops *ops = &global_ops;
-
if (unlikely(ftrace_disabled))
return;
@@ -1539,10 +1537,8 @@ static void ftrace_startup(int command)
ftrace_startup_enable(command);
}
-static void ftrace_shutdown(int command)
+static void ftrace_shutdown(struct ftrace_ops *ops, int command)
{
- struct ftrace_ops *ops = &global_ops;
-
if (unlikely(ftrace_disabled))
return;
@@ -2362,7 +2358,7 @@ static void __enable_ftrace_function_probe(void)
return;
__register_ftrace_function(&trace_probe_ops);
- ftrace_startup(0);
+ ftrace_startup(&global_ops, 0);
ftrace_probe_registered = 1;
}
@@ -2381,7 +2377,7 @@ static void __disable_ftrace_function_probe(void)
/* no more funcs left */
__unregister_ftrace_function(&trace_probe_ops);
- ftrace_shutdown(0);
+ ftrace_shutdown(&global_ops, 0);
ftrace_probe_registered = 0;
}
@@ -3267,6 +3263,10 @@ void __init ftrace_init(void)
#else
+struct ftrace_ops global_ops = {
+ .func = ftrace_stub,
+};
+
static int __init ftrace_nodyn_init(void)
{
ftrace_enabled = 1;
@@ -3277,8 +3277,8 @@ device_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(command) do { } while (0)
-# define ftrace_shutdown(command) do { } while (0)
+# define ftrace_startup(ops, command) do { } while (0)
+# define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
@@ -3583,7 +3583,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
goto out_unlock;
ret = __register_ftrace_function(ops);
- ftrace_startup(0);
+ ftrace_startup(&global_ops, 0);
out_unlock:
mutex_unlock(&ftrace_lock);
@@ -3602,7 +3602,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
mutex_lock(&ftrace_lock);
ret = __unregister_ftrace_function(ops);
- ftrace_shutdown(0);
+ ftrace_shutdown(&global_ops, 0);
mutex_unlock(&ftrace_lock);
return ret;
@@ -3825,7 +3825,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;
- ftrace_startup(FTRACE_START_FUNC_RET);
+ ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
out:
mutex_unlock(&ftrace_lock);
@@ -3842,7 +3842,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_active--;
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
- ftrace_shutdown(FTRACE_STOP_FUNC_RET);
+ ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 08/13] ftrace: Have global_ops store the functions that are to be traced
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (6 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 07/13] ftrace: Add ops parameter to ftrace_startup/shutdown functions Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 09/13] ftrace: Free hash with call_rcu_sched() Steven Rostedt
` (5 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0008-ftrace-Have-global_ops-store-the-functions-that-are-.patch --]
[-- Type: text/plain, Size: 4116 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
This is a step towards each ops structure defining its own set
of functions to trace. As the current code with pid's and such
are specific to the global_ops, it is restructured to be used
with the global ops.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ftrace.c | 69 +++++++++++++++++++++++++++++++++++++-----------
1 files changed, 53 insertions(+), 16 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8fef1d9..dcce0bf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -91,6 +91,7 @@ static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
+static struct ftrace_ops global_ops;
/*
* Traverse the ftrace_list, invoking all entries. The reason that we
@@ -153,7 +154,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
}
#endif
-static void update_ftrace_function(void)
+static void update_global_ops(void)
{
ftrace_func_t func;
@@ -173,6 +174,18 @@ static void update_ftrace_function(void)
set_ftrace_pid_function(func);
func = ftrace_pid_func;
}
+
+ global_ops.func = func;
+}
+
+static void update_ftrace_function(void)
+{
+ ftrace_func_t func;
+
+ update_global_ops();
+
+ func = global_ops.func;
+
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ftrace_trace_function = func;
#else
@@ -181,24 +194,19 @@ static void update_ftrace_function(void)
#endif
}
-static int __register_ftrace_function(struct ftrace_ops *ops)
+static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
{
- ops->next = ftrace_list;
+ ops->next = *list;
/*
* We are entering ops into the ftrace_list but another
* CPU might be walking that list. We need to make sure
* the ops->next pointer is valid before another CPU sees
* the ops pointer included into the ftrace_list.
*/
- rcu_assign_pointer(ftrace_list, ops);
-
- if (ftrace_enabled)
- update_ftrace_function();
-
- return 0;
+ rcu_assign_pointer(*list, ops);
}
-static int __unregister_ftrace_function(struct ftrace_ops *ops)
+static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
{
struct ftrace_ops **p;
@@ -206,13 +214,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
* If we are removing the last function, then simply point
* to the ftrace_stub.
*/
- if (ftrace_list == ops && ops->next == &ftrace_list_end) {
- ftrace_trace_function = ftrace_stub;
- ftrace_list = &ftrace_list_end;
+ if (*list == ops && ops->next == &ftrace_list_end) {
+ *list = &ftrace_list_end;
return 0;
}
- for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
+ for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
if (*p == ops)
break;
@@ -220,7 +227,37 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
return -1;
*p = (*p)->next;
+ return 0;
+}
+
+static int __register_ftrace_function(struct ftrace_ops *ops)
+{
+ if (ftrace_disabled)
+ return -ENODEV;
+
+ if (FTRACE_WARN_ON(ops == &global_ops))
+ return -EINVAL;
+
+ add_ftrace_ops(&ftrace_list, ops);
+ if (ftrace_enabled)
+ update_ftrace_function();
+
+ return 0;
+}
+static int __unregister_ftrace_function(struct ftrace_ops *ops)
+{
+ int ret;
+
+ if (ftrace_disabled)
+ return -ENODEV;
+
+ if (FTRACE_WARN_ON(ops == &global_ops))
+ return -EINVAL;
+
+ ret = remove_ftrace_ops(&ftrace_list, ops);
+ if (ret < 0)
+ return ret;
if (ftrace_enabled)
update_ftrace_function();
@@ -894,7 +931,7 @@ enum {
FTRACE_OPS_FL_ENABLED = 1,
};
-struct ftrace_ops global_ops = {
+static struct ftrace_ops global_ops = {
.func = ftrace_stub,
.notrace_hash = EMPTY_HASH,
.filter_hash = EMPTY_HASH,
@@ -3263,7 +3300,7 @@ void __init ftrace_init(void)
#else
-struct ftrace_ops global_ops = {
+static struct ftrace_ops global_ops = {
.func = ftrace_stub,
};
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 09/13] ftrace: Free hash with call_rcu_sched()
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (7 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 08/13] ftrace: Have global_ops store the functions that are to be traced Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 10/13] ftrace: Implement separate user function filtering Steven Rostedt
` (4 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin, Paul E. McKenney
[-- Attachment #1: 0009-ftrace-Free-hash-with-call_rcu_sched.patch --]
[-- Type: text/plain, Size: 3112 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
When a hash is modified and might be in use, we need to perform
a schedule RCU operation on it, as the hashes will soon be used
directly in the function tracer callback.
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ftrace.c | 55 +++++++++++++++++++++++++------------------------
1 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index dcce0bf..92b6fdf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -913,6 +913,7 @@ struct ftrace_hash {
unsigned long size_bits;
struct hlist_head *buckets;
unsigned long count;
+ struct rcu_head rcu;
};
/*
@@ -1058,6 +1059,21 @@ static void free_ftrace_hash(struct ftrace_hash *hash)
kfree(hash);
}
+static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
+{
+ struct ftrace_hash *hash;
+
+ hash = container_of(rcu, struct ftrace_hash, rcu);
+ free_ftrace_hash(hash);
+}
+
+static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
+{
+ if (!hash || hash == EMPTY_HASH)
+ return;
+ call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
+}
+
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
struct ftrace_hash *hash;
@@ -1122,7 +1138,8 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
struct ftrace_func_entry *entry;
struct hlist_node *tp, *tn;
struct hlist_head *hhd;
- struct ftrace_hash *hash = *dst;
+ struct ftrace_hash *old_hash;
+ struct ftrace_hash *new_hash;
unsigned long key;
int size = src->count;
int bits = 0;
@@ -1133,13 +1150,11 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
* the empty_hash.
*/
if (!src->count) {
- free_ftrace_hash(*dst);
- *dst = EMPTY_HASH;
+ free_ftrace_hash_rcu(*dst);
+ rcu_assign_pointer(*dst, EMPTY_HASH);
return 0;
}
- ftrace_hash_clear(hash);
-
/*
* Make the hash size about 1/2 the # found
*/
@@ -1150,27 +1165,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
if (bits > FTRACE_HASH_MAX_BITS)
bits = FTRACE_HASH_MAX_BITS;
- /* We can't modify the empty_hash */
- if (hash == EMPTY_HASH) {
- /* Create a new hash */
- *dst = alloc_ftrace_hash(bits);
- if (!*dst) {
- *dst = EMPTY_HASH;
- return -ENOMEM;
- }
- hash = *dst;
- } else {
- size = 1 << bits;
-
- /* Use the old hash, but create new buckets */
- hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
- if (!hhd)
- return -ENOMEM;
-
- kfree(hash->buckets);
- hash->buckets = hhd;
- hash->size_bits = bits;
- }
+ new_hash = alloc_ftrace_hash(bits);
+ if (!new_hash)
+ return -ENOMEM;
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
@@ -1181,10 +1178,14 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
else
key = 0;
remove_hash_entry(src, entry);
- __add_hash_entry(hash, entry);
+ __add_hash_entry(new_hash, entry);
}
}
+ old_hash = *dst;
+ rcu_assign_pointer(*dst, new_hash);
+ free_ftrace_hash_rcu(old_hash);
+
return 0;
}
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 10/13] ftrace: Implement separate user function filtering
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (8 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 09/13] ftrace: Free hash with call_rcu_sched() Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 11/13] ftrace: Allow dynamically allocated function tracers Steven Rostedt
` (3 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin, Paul E. McKenney
[-- Attachment #1: 0010-ftrace-Implement-separate-user-function-filtering.patch --]
[-- Type: text/plain, Size: 14971 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
ftrace_ops that are registered to trace functions can now be
agnostic to each other in respect to what functions they trace.
Each ops has their own hash of the functions they want to trace
and a hash to what they do not want to trace. A empty hash for
the functions they want to trace denotes all functions should
be traced that are not in the notrace hash.
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ftrace.h | 7 +-
kernel/trace/ftrace.c | 193 +++++++++++++++++++++++++++++-------
kernel/trace/trace_functions.c | 2 +
kernel/trace/trace_irqsoff.c | 1 +
kernel/trace/trace_sched_wakeup.c | 1 +
kernel/trace/trace_stack.c | 1 +
6 files changed, 166 insertions(+), 39 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index ab1c46e..4609c0e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -31,13 +31,18 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
struct ftrace_hash;
+enum {
+ FTRACE_OPS_FL_ENABLED = 1 << 0,
+ FTRACE_OPS_FL_GLOBAL = 1 << 1,
+};
+
struct ftrace_ops {
ftrace_func_t func;
struct ftrace_ops *next;
+ unsigned long flags;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash;
- unsigned long flags;
#endif
};
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 92b6fdf..6c7e1df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -87,24 +87,29 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
.func = ftrace_stub,
};
-static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
+static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
+static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
+static void
+ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
+
/*
- * Traverse the ftrace_list, invoking all entries. The reason that we
+ * Traverse the ftrace_global_list, invoking all entries. The reason that we
* can use rcu_dereference_raw() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period
* mechanism. The rcu_dereference_raw() calls are needed to handle
- * concurrent insertions into the ftrace_list.
+ * concurrent insertions into the ftrace_global_list.
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
-static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
+static void ftrace_global_list_func(unsigned long ip,
+ unsigned long parent_ip)
{
- struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
+ struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) {
op->func(ip, parent_ip);
@@ -163,11 +168,11 @@ static void update_global_ops(void)
* function directly. Otherwise, we need to iterate over the
* registered callers.
*/
- if (ftrace_list == &ftrace_list_end ||
- ftrace_list->next == &ftrace_list_end)
- func = ftrace_list->func;
+ if (ftrace_global_list == &ftrace_list_end ||
+ ftrace_global_list->next == &ftrace_list_end)
+ func = ftrace_global_list->func;
else
- func = ftrace_list_func;
+ func = ftrace_global_list_func;
/* If we filter on pids, update to use the pid function */
if (!list_empty(&ftrace_pids)) {
@@ -184,7 +189,11 @@ static void update_ftrace_function(void)
update_global_ops();
- func = global_ops.func;
+ if (ftrace_ops_list == &ftrace_list_end ||
+ ftrace_ops_list->next == &ftrace_list_end)
+ func = ftrace_ops_list->func;
+ else
+ func = ftrace_ops_list_func;
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ftrace_trace_function = func;
@@ -198,10 +207,10 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
{
ops->next = *list;
/*
- * We are entering ops into the ftrace_list but another
+ * We are entering ops into the list but another
* CPU might be walking that list. We need to make sure
* the ops->next pointer is valid before another CPU sees
- * the ops pointer included into the ftrace_list.
+ * the ops pointer included into the list.
*/
rcu_assign_pointer(*list, ops);
}
@@ -238,7 +247,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL;
- add_ftrace_ops(&ftrace_list, ops);
+ if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return -EBUSY;
+
+ if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+ int first = ftrace_global_list == &ftrace_list_end;
+ add_ftrace_ops(&ftrace_global_list, ops);
+ ops->flags |= FTRACE_OPS_FL_ENABLED;
+ if (first)
+ add_ftrace_ops(&ftrace_ops_list, &global_ops);
+ } else
+ add_ftrace_ops(&ftrace_ops_list, ops);
+
if (ftrace_enabled)
update_ftrace_function();
@@ -252,12 +272,24 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_disabled)
return -ENODEV;
+ if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
+ return -EBUSY;
+
if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL;
- ret = remove_ftrace_ops(&ftrace_list, ops);
+ if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+ ret = remove_ftrace_ops(&ftrace_global_list, ops);
+ if (!ret && ftrace_global_list == &ftrace_list_end)
+ ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
+ if (!ret)
+ ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+ } else
+ ret = remove_ftrace_ops(&ftrace_ops_list, ops);
+
if (ret < 0)
return ret;
+
if (ftrace_enabled)
update_ftrace_function();
@@ -928,10 +960,6 @@ static const struct ftrace_hash empty_hash = {
};
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
-enum {
- FTRACE_OPS_FL_ENABLED = 1,
-};
-
static struct ftrace_ops global_ops = {
.func = ftrace_stub,
.notrace_hash = EMPTY_HASH,
@@ -1190,6 +1218,40 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
}
/*
+ * Test the hashes for this ops to see if we want to call
+ * the ops->func or not.
+ *
+ * It's a match if the ip is in the ops->filter_hash or
+ * the filter_hash does not exist or is empty,
+ * AND
+ * the ip is not in the ops->notrace_hash.
+ */
+static int
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+{
+ struct ftrace_hash *filter_hash;
+ struct ftrace_hash *notrace_hash;
+ int ret;
+
+ /* The hashes are freed with call_rcu_sched() */
+ preempt_disable_notrace();
+
+ filter_hash = rcu_dereference_raw(ops->filter_hash);
+ notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+
+ if ((!filter_hash || !filter_hash->count ||
+ ftrace_lookup_ip(filter_hash, ip)) &&
+ (!notrace_hash || !notrace_hash->count ||
+ !ftrace_lookup_ip(notrace_hash, ip)))
+ ret = 1;
+ else
+ ret = 0;
+ preempt_enable_notrace();
+
+ return ret;
+}
+
+/*
* This is a double for. Do not use 'break' to break out of the loop,
* you must use a goto.
*/
@@ -1232,7 +1294,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
if (filter_hash) {
hash = ops->filter_hash;
other_hash = ops->notrace_hash;
- if (!hash->count)
+ if (!hash || !hash->count)
all = 1;
} else {
inc = !inc;
@@ -1242,7 +1304,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
* If the notrace hash has no items,
* then there's nothing to do.
*/
- if (!hash->count)
+ if (hash && !hash->count)
return;
}
@@ -1256,11 +1318,11 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
* Only the filter_hash affects all records.
* Update if the record is not in the notrace hash.
*/
- if (!ftrace_lookup_ip(other_hash, rec->ip))
+ if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
match = 1;
} else {
- in_hash = !!ftrace_lookup_ip(hash, rec->ip);
- in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
+ in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
+ in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
/*
*
@@ -1546,6 +1608,7 @@ static void ftrace_run_update_code(int command)
static ftrace_func_t saved_ftrace_func;
static int ftrace_start_up;
+static int global_start_up;
static void ftrace_startup_enable(int command)
{
@@ -1562,14 +1625,25 @@ static void ftrace_startup_enable(int command)
static void ftrace_startup(struct ftrace_ops *ops, int command)
{
+ bool hash_enable = true;
+
if (unlikely(ftrace_disabled))
return;
ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
+ /* ops marked global share the filter hashes */
+ if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+ ops = &global_ops;
+ /* Don't update hash if global is already set */
+ if (global_start_up)
+ hash_enable = false;
+ global_start_up++;
+ }
+
ops->flags |= FTRACE_OPS_FL_ENABLED;
- if (ftrace_start_up == 1)
+ if (hash_enable)
ftrace_hash_rec_enable(ops, 1);
ftrace_startup_enable(command);
@@ -1577,6 +1651,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
static void ftrace_shutdown(struct ftrace_ops *ops, int command)
{
+ bool hash_disable = true;
+
if (unlikely(ftrace_disabled))
return;
@@ -1588,13 +1664,25 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
*/
WARN_ON_ONCE(ftrace_start_up < 0);
- if (!ftrace_start_up)
+ if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+ ops = &global_ops;
+ global_start_up--;
+ WARN_ON_ONCE(global_start_up < 0);
+ /* Don't update hash if global still has users */
+ if (global_start_up) {
+ WARN_ON_ONCE(!ftrace_start_up);
+ hash_disable = false;
+ }
+ }
+
+ if (hash_disable)
ftrace_hash_rec_disable(ops, 1);
- if (!ftrace_start_up) {
- command |= FTRACE_DISABLE_CALLS;
+ if (ops != &global_ops || !global_start_up)
ops->flags &= ~FTRACE_OPS_FL_ENABLED;
- }
+
+ if (!ftrace_start_up)
+ command |= FTRACE_DISABLE_CALLS;
if (saved_ftrace_func != ftrace_trace_function) {
saved_ftrace_func = ftrace_trace_function;
@@ -2381,6 +2469,7 @@ static int ftrace_probe_registered;
static void __enable_ftrace_function_probe(void)
{
+ int ret;
int i;
if (ftrace_probe_registered)
@@ -2395,13 +2484,16 @@ static void __enable_ftrace_function_probe(void)
if (i == FTRACE_FUNC_HASHSIZE)
return;
- __register_ftrace_function(&trace_probe_ops);
- ftrace_startup(&global_ops, 0);
+ ret = __register_ftrace_function(&trace_probe_ops);
+ if (!ret)
+ ftrace_startup(&trace_probe_ops, 0);
+
ftrace_probe_registered = 1;
}
static void __disable_ftrace_function_probe(void)
{
+ int ret;
int i;
if (!ftrace_probe_registered)
@@ -2414,8 +2506,10 @@ static void __disable_ftrace_function_probe(void)
}
/* no more funcs left */
- __unregister_ftrace_function(&trace_probe_ops);
- ftrace_shutdown(&global_ops, 0);
+ ret = __unregister_ftrace_function(&trace_probe_ops);
+ if (!ret)
+ ftrace_shutdown(&trace_probe_ops, 0);
+
ftrace_probe_registered = 0;
}
@@ -3319,8 +3413,28 @@ static inline void ftrace_startup_enable(int command) { }
# define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
+
+static inline int
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+{
+ return 1;
+}
+
#endif /* CONFIG_DYNAMIC_FTRACE */
+static void
+ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
+{
+ /* see comment above ftrace_global_list_func */
+ struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list);
+
+ while (op != &ftrace_list_end) {
+ if (ftrace_ops_test(op, ip))
+ op->func(ip, parent_ip);
+ op = rcu_dereference_raw(op->next);
+ };
+}
+
static void clear_ftrace_swapper(void)
{
struct task_struct *p;
@@ -3621,7 +3735,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
goto out_unlock;
ret = __register_ftrace_function(ops);
- ftrace_startup(&global_ops, 0);
+ if (!ret)
+ ftrace_startup(ops, 0);
+
out_unlock:
mutex_unlock(&ftrace_lock);
@@ -3640,7 +3756,8 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
mutex_lock(&ftrace_lock);
ret = __unregister_ftrace_function(ops);
- ftrace_shutdown(&global_ops, 0);
+ if (!ret)
+ ftrace_shutdown(ops, 0);
mutex_unlock(&ftrace_lock);
return ret;
@@ -3670,11 +3787,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
ftrace_startup_sysctl();
/* we are starting ftrace again */
- if (ftrace_list != &ftrace_list_end) {
- if (ftrace_list->next == &ftrace_list_end)
- ftrace_trace_function = ftrace_list->func;
+ if (ftrace_ops_list != &ftrace_list_end) {
+ if (ftrace_ops_list->next == &ftrace_list_end)
+ ftrace_trace_function = ftrace_ops_list->func;
else
- ftrace_trace_function = ftrace_list_func;
+ ftrace_trace_function = ftrace_ops_list_func;
}
} else {
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 16aee4d..8d0e1cc 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
+ .flags = FTRACE_OPS_FL_GLOBAL,
};
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
+ .flags = FTRACE_OPS_FL_GLOBAL,
};
/* Our two options */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a4969b4..c77424b 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -153,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = irqsoff_tracer_call,
+ .flags = FTRACE_OPS_FL_GLOBAL,
};
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 7319559..f029dd4 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -129,6 +129,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = wakeup_tracer_call,
+ .flags = FTRACE_OPS_FL_GLOBAL,
};
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 4c5dead..b0b53b8 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -133,6 +133,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
+ .flags = FTRACE_OPS_FL_GLOBAL,
};
static ssize_t
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 11/13] ftrace: Allow dynamically allocated function tracers
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (9 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 10/13] ftrace: Implement separate user function filtering Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 12/13] ftrace: Modify ftrace_set_filter/notrace to take ops Steven Rostedt
` (2 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin, Paul E. McKenney
[-- Attachment #1: 0011-ftrace-Allow-dynamically-allocated-function-tracers.patch --]
[-- Type: text/plain, Size: 6126 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
Now that functions may be selected individually, it only makes sense
that we should allow dynamically allocated trace structures to
be traced. This will allow perf to allocate a ftrace_ops structure
at runtime and use it to pick and choose which functions that
structure will trace.
Note, a dynamically allocated ftrace_ops will always be called
indirectly instead of being called directly from the mcount in
entry.S. This is because there's no safe way to prevent mcount
from being preempted before calling the function, unless we
modify every entry.S to do so (not likely). Thus, dynamically allocated
functions will now be called by the ftrace_ops_list_func() that
loops through the ops that are allocated if there are more than
one op allocated at a time. This loop is protected with a
preempt_disable.
To determine if an ftrace_ops structure is allocated or not, a new
util function was added to the kernel/extable.c called
core_kernel_data(), which returns 1 if the address is between
_sdata and _edata.
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ftrace.h | 1 +
include/linux/kernel.h | 1 +
kernel/extable.c | 8 ++++++++
kernel/trace/ftrace.c | 37 ++++++++++++++++++++++++++++++-------
4 files changed, 40 insertions(+), 7 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 4609c0e..caba694 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -34,6 +34,7 @@ struct ftrace_hash;
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1,
+ FTRACE_OPS_FL_DYNAMIC = 1 << 2,
};
struct ftrace_ops {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 00cec4d..f37ba71 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
+extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
diff --git a/kernel/extable.c b/kernel/extable.c
index 7f8f263..c2d625f 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -72,6 +72,14 @@ int core_kernel_text(unsigned long addr)
return 0;
}
+int core_kernel_data(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sdata &&
+ addr < (unsigned long)_edata)
+ return 1;
+ return 0;
+}
+
int __kernel_text_address(unsigned long addr)
{
if (core_kernel_text(addr))
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6c7e1df..5b3ee04 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -189,8 +189,14 @@ static void update_ftrace_function(void)
update_global_ops();
+ /*
+ * If we are at the end of the list and this ops is
+ * not dynamic, then have the mcount trampoline call
+ * the function directly
+ */
if (ftrace_ops_list == &ftrace_list_end ||
- ftrace_ops_list->next == &ftrace_list_end)
+ (ftrace_ops_list->next == &ftrace_list_end &&
+ !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
func = ftrace_ops_list->func;
else
func = ftrace_ops_list_func;
@@ -250,6 +256,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
return -EBUSY;
+ if (!core_kernel_data((unsigned long)ops))
+ ops->flags |= FTRACE_OPS_FL_DYNAMIC;
+
if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
int first = ftrace_global_list == &ftrace_list_end;
add_ftrace_ops(&ftrace_global_list, ops);
@@ -293,6 +302,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_enabled)
update_ftrace_function();
+ /*
+ * Dynamic ops may be freed, we must make sure that all
+ * callers are done before leaving this function.
+ */
+ if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+ synchronize_sched();
+
return 0;
}
@@ -1225,6 +1241,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
* the filter_hash does not exist or is empty,
* AND
* the ip is not in the ops->notrace_hash.
+ *
+ * This needs to be called with preemption disabled as
+ * the hashes are freed with call_rcu_sched().
*/
static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
@@ -1233,9 +1252,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
struct ftrace_hash *notrace_hash;
int ret;
- /* The hashes are freed with call_rcu_sched() */
- preempt_disable_notrace();
-
filter_hash = rcu_dereference_raw(ops->filter_hash);
notrace_hash = rcu_dereference_raw(ops->notrace_hash);
@@ -1246,7 +1262,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
ret = 1;
else
ret = 0;
- preempt_enable_notrace();
return ret;
}
@@ -3425,14 +3440,20 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
static void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
- /* see comment above ftrace_global_list_func */
- struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list);
+ struct ftrace_ops *op;
+ /*
+ * Some of the ops may be dynamically allocated,
+ * they must be freed after a synchronize_sched().
+ */
+ preempt_disable_notrace();
+ op = rcu_dereference_raw(ftrace_ops_list);
while (op != &ftrace_list_end) {
if (ftrace_ops_test(op, ip))
op->func(ip, parent_ip);
op = rcu_dereference_raw(op->next);
};
+ preempt_enable_notrace();
}
static void clear_ftrace_swapper(void)
@@ -3743,6 +3764,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
mutex_unlock(&ftrace_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(register_ftrace_function);
/**
* unregister_ftrace_function - unregister a function for profiling.
@@ -3762,6 +3784,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
return ret;
}
+EXPORT_SYMBOL_GPL(unregister_ftrace_function);
int
ftrace_enable_sysctl(struct ctl_table *table, int write,
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 12/13] ftrace: Modify ftrace_set_filter/notrace to take ops
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (10 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 11/13] ftrace: Allow dynamically allocated function tracers Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-06 15:26 ` [RFC][PATCH 13/13] ftrace: Add self-tests for multiple function trace users Steven Rostedt
2011-05-10 8:00 ` [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Ingo Molnar
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0012-ftrace-Modify-ftrace_set_filter-notrace-to-take-ops.patch --]
[-- Type: text/plain, Size: 5728 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
Since users of the function tracer can now pick and choose which
functions they want to trace agnostically from other users of the
function tracer, we need to pass the ops struct to the ftrace_set_filter()
functions.
The functions ftrace_set_global_filter() and ftrace_set_global_notrace()
is added to keep the old filter functions which are used to modify
the generic function tracers.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ftrace.h | 7 +++++-
kernel/trace/ftrace.c | 46 +++++++++++++++++++++++++++++++++++++++-
kernel/trace/trace_selftest.c | 7 ++++-
3 files changed, 55 insertions(+), 5 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index caba694..9d88e1c 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -179,7 +179,12 @@ struct dyn_ftrace {
};
int ftrace_force_update(void);
-void ftrace_set_filter(unsigned char *buf, int len, int reset);
+void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
+void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b3ee04..d017c2c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2826,6 +2826,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
struct ftrace_hash *hash;
int ret;
+ /* All global ops uses the global ops filters */
+ if (ops->flags & FTRACE_OPS_FL_GLOBAL)
+ ops = &global_ops;
+
if (unlikely(ftrace_disabled))
return -ENODEV;
@@ -2856,6 +2860,41 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
/**
* ftrace_set_filter - set a function to filter on in ftrace
+ * @ops - the ops to set the filter with
+ * @buf - the string that holds the function filter text.
+ * @len - the length of the string.
+ * @reset - non zero to reset all filters before applying this filter.
+ *
+ * Filters denote which functions should be enabled when tracing is enabled.
+ * If @buf is NULL and reset is set, all functions will be enabled for tracing.
+ */
+void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset)
+{
+ ftrace_set_regex(ops, buf, len, reset, 1);
+}
+EXPORT_SYMBOL_GPL(ftrace_set_filter);
+
+/**
+ * ftrace_set_notrace - set a function to not trace in ftrace
+ * @ops - the ops to set the notrace filter with
+ * @buf - the string that holds the function notrace text.
+ * @len - the length of the string.
+ * @reset - non zero to reset all filters before applying this filter.
+ *
+ * Notrace Filters denote which functions should not be enabled when tracing
+ * is enabled. If @buf is NULL and reset is set, all functions will be enabled
+ * for tracing.
+ */
+void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset)
+{
+ ftrace_set_regex(ops, buf, len, reset, 0);
+}
+EXPORT_SYMBOL_GPL(ftrace_set_notrace);
+/**
+ * ftrace_set_filter - set a function to filter on in ftrace
+ * @ops - the ops to set the filter with
* @buf - the string that holds the function filter text.
* @len - the length of the string.
* @reset - non zero to reset all filters before applying this filter.
@@ -2863,13 +2902,15 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
*/
-void ftrace_set_filter(unsigned char *buf, int len, int reset)
+void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
{
ftrace_set_regex(&global_ops, buf, len, reset, 1);
}
+EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
/**
* ftrace_set_notrace - set a function to not trace in ftrace
+ * @ops - the ops to set the notrace filter with
* @buf - the string that holds the function notrace text.
* @len - the length of the string.
* @reset - non zero to reset all filters before applying this filter.
@@ -2878,10 +2919,11 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset)
* is enabled. If @buf is NULL and reset is set, all functions will be enabled
* for tracing.
*/
-void ftrace_set_notrace(unsigned char *buf, int len, int reset)
+void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
{
ftrace_set_regex(&global_ops, buf, len, reset, 0);
}
+EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
/*
* command line interface to allow users to set filters on boot up.
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 659732e..0d563a9 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -131,7 +131,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
/* filter only on our function */
- ftrace_set_filter(func_name, strlen(func_name), 1);
+ ftrace_set_global_filter(func_name, strlen(func_name), 1);
/* enable tracing */
ret = tracer_init(trace, tr);
@@ -181,7 +181,10 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
tracer_enabled = save_tracer_enabled;
/* Enable tracing on all functions again */
- ftrace_set_filter(NULL, 0, 1);
+ ftrace_set_global_filter(NULL, 0, 1);
+
+ if (!ret)
+ ret = trace_selftest_probes();
return ret;
}
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC][PATCH 13/13] ftrace: Add self-tests for multiple function trace users
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (11 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 12/13] ftrace: Modify ftrace_set_filter/notrace to take ops Steven Rostedt
@ 2011-05-06 15:26 ` Steven Rostedt
2011-05-10 8:00 ` [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Ingo Molnar
13 siblings, 0 replies; 17+ messages in thread
From: Steven Rostedt @ 2011-05-06 15:26 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
[-- Attachment #1: 0013-ftrace-Add-self-tests-for-multiple-function-trace-us.patch --]
[-- Type: text/plain, Size: 7553 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
Add some basic sanity tests for multiple users of the function
tracer at startup.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/trace.h | 2 +
kernel/trace/trace_selftest.c | 209 ++++++++++++++++++++++++++++++++-
kernel/trace/trace_selftest_dynamic.c | 6 +
3 files changed, 215 insertions(+), 2 deletions(-)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5e9dfc6..6b69c4b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -419,6 +419,8 @@ extern void trace_find_cmdline(int pid, char comm[]);
extern unsigned long ftrace_update_tot_cnt;
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
+#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
+extern int DYN_FTRACE_TEST_NAME2(void);
#endif
extern int ring_buffer_expanded;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 0d563a9..288541f 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -101,6 +101,206 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
#ifdef CONFIG_DYNAMIC_FTRACE
+static int trace_selftest_test_probe1_cnt;
+static void trace_selftest_test_probe1_func(unsigned long ip,
+ unsigned long pip)
+{
+ trace_selftest_test_probe1_cnt++;
+}
+
+static int trace_selftest_test_probe2_cnt;
+static void trace_selftest_test_probe2_func(unsigned long ip,
+ unsigned long pip)
+{
+ trace_selftest_test_probe2_cnt++;
+}
+
+static int trace_selftest_test_probe3_cnt;
+static void trace_selftest_test_probe3_func(unsigned long ip,
+ unsigned long pip)
+{
+ trace_selftest_test_probe3_cnt++;
+}
+
+static int trace_selftest_test_global_cnt;
+static void trace_selftest_test_global_func(unsigned long ip,
+ unsigned long pip)
+{
+ trace_selftest_test_global_cnt++;
+}
+
+static int trace_selftest_test_dyn_cnt;
+static void trace_selftest_test_dyn_func(unsigned long ip,
+ unsigned long pip)
+{
+ trace_selftest_test_dyn_cnt++;
+}
+
+static struct ftrace_ops test_probe1 = {
+ .func = trace_selftest_test_probe1_func,
+};
+
+static struct ftrace_ops test_probe2 = {
+ .func = trace_selftest_test_probe2_func,
+};
+
+static struct ftrace_ops test_probe3 = {
+ .func = trace_selftest_test_probe3_func,
+};
+
+static struct ftrace_ops test_global = {
+ .func = trace_selftest_test_global_func,
+ .flags = FTRACE_OPS_FL_GLOBAL,
+};
+
+static void print_counts(void)
+{
+ printk("(%d %d %d %d %d) ",
+ trace_selftest_test_probe1_cnt,
+ trace_selftest_test_probe2_cnt,
+ trace_selftest_test_probe3_cnt,
+ trace_selftest_test_global_cnt,
+ trace_selftest_test_dyn_cnt);
+}
+
+static void reset_counts(void)
+{
+ trace_selftest_test_probe1_cnt = 0;
+ trace_selftest_test_probe2_cnt = 0;
+ trace_selftest_test_probe3_cnt = 0;
+ trace_selftest_test_global_cnt = 0;
+ trace_selftest_test_dyn_cnt = 0;
+}
+
+static int trace_selftest_ops(int cnt)
+{
+ int save_ftrace_enabled = ftrace_enabled;
+ struct ftrace_ops *dyn_ops;
+ char *func1_name;
+ char *func2_name;
+ int len1;
+ int len2;
+ int ret = -1;
+
+ printk(KERN_CONT "PASSED\n");
+ pr_info("Testing dynamic ftrace ops #%d: ", cnt);
+
+ ftrace_enabled = 1;
+ reset_counts();
+
+ /* Handle PPC64 '.' name */
+ func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
+ func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
+ len1 = strlen(func1_name);
+ len2 = strlen(func2_name);
+
+ /*
+ * Probe 1 will trace function 1.
+ * Probe 2 will trace function 2.
+ * Probe 3 will trace functions 1 and 2.
+ */
+ ftrace_set_filter(&test_probe1, func1_name, len1, 1);
+ ftrace_set_filter(&test_probe2, func2_name, len2, 1);
+ ftrace_set_filter(&test_probe3, func1_name, len1, 1);
+ ftrace_set_filter(&test_probe3, func2_name, len2, 0);
+
+ register_ftrace_function(&test_probe1);
+ register_ftrace_function(&test_probe2);
+ register_ftrace_function(&test_probe3);
+ register_ftrace_function(&test_global);
+
+ DYN_FTRACE_TEST_NAME();
+
+ print_counts();
+
+ if (trace_selftest_test_probe1_cnt != 1)
+ goto out;
+ if (trace_selftest_test_probe2_cnt != 0)
+ goto out;
+ if (trace_selftest_test_probe3_cnt != 1)
+ goto out;
+ if (trace_selftest_test_global_cnt == 0)
+ goto out;
+
+ DYN_FTRACE_TEST_NAME2();
+
+ print_counts();
+
+ if (trace_selftest_test_probe1_cnt != 1)
+ goto out;
+ if (trace_selftest_test_probe2_cnt != 1)
+ goto out;
+ if (trace_selftest_test_probe3_cnt != 2)
+ goto out;
+
+ /* Add a dynamic probe */
+ dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
+ if (!dyn_ops) {
+ printk("MEMORY ERROR ");
+ goto out;
+ }
+
+ dyn_ops->func = trace_selftest_test_dyn_func;
+
+ register_ftrace_function(dyn_ops);
+
+ trace_selftest_test_global_cnt = 0;
+
+ DYN_FTRACE_TEST_NAME();
+
+ print_counts();
+
+ if (trace_selftest_test_probe1_cnt != 2)
+ goto out_free;
+ if (trace_selftest_test_probe2_cnt != 1)
+ goto out_free;
+ if (trace_selftest_test_probe3_cnt != 3)
+ goto out_free;
+ if (trace_selftest_test_global_cnt == 0)
+ goto out;
+ if (trace_selftest_test_dyn_cnt == 0)
+ goto out_free;
+
+ DYN_FTRACE_TEST_NAME2();
+
+ print_counts();
+
+ if (trace_selftest_test_probe1_cnt != 2)
+ goto out_free;
+ if (trace_selftest_test_probe2_cnt != 2)
+ goto out_free;
+ if (trace_selftest_test_probe3_cnt != 4)
+ goto out_free;
+
+ ret = 0;
+ out_free:
+ unregister_ftrace_function(dyn_ops);
+ kfree(dyn_ops);
+
+ out:
+ /* Purposely unregister in the same order */
+ unregister_ftrace_function(&test_probe1);
+ unregister_ftrace_function(&test_probe2);
+ unregister_ftrace_function(&test_probe3);
+ unregister_ftrace_function(&test_global);
+
+ /* Make sure everything is off */
+ reset_counts();
+ DYN_FTRACE_TEST_NAME();
+ DYN_FTRACE_TEST_NAME();
+
+ if (trace_selftest_test_probe1_cnt ||
+ trace_selftest_test_probe2_cnt ||
+ trace_selftest_test_probe3_cnt ||
+ trace_selftest_test_global_cnt ||
+ trace_selftest_test_dyn_cnt)
+ ret = -1;
+
+ ftrace_enabled = save_ftrace_enabled;
+
+ return ret;
+}
+
/* Test dynamic code modification and ftrace filters */
int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
struct trace_array *tr,
@@ -166,16 +366,20 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* check the trace buffer */
ret = trace_test_buffer(tr, &count);
- trace->reset(tr);
tracing_start();
/* we should only have one item */
if (!ret && count != 1) {
+ trace->reset(tr);
printk(KERN_CONT ".. filter failed count=%ld ..", count);
ret = -1;
goto out;
}
+ /* Test the ops with global tracing running */
+ ret = trace_selftest_ops(1);
+ trace->reset(tr);
+
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
@@ -183,8 +387,9 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* Enable tracing on all functions again */
ftrace_set_global_filter(NULL, 0, 1);
+ /* Test the ops with global tracing off */
if (!ret)
- ret = trace_selftest_probes();
+ ret = trace_selftest_ops(2);
return ret;
}
diff --git a/kernel/trace/trace_selftest_dynamic.c b/kernel/trace/trace_selftest_dynamic.c
index 54dd77c..b4c475a 100644
--- a/kernel/trace/trace_selftest_dynamic.c
+++ b/kernel/trace/trace_selftest_dynamic.c
@@ -5,3 +5,9 @@ int DYN_FTRACE_TEST_NAME(void)
/* used to call mcount */
return 0;
}
+
+int DYN_FTRACE_TEST_NAME2(void)
+{
+ /* used to call mcount */
+ return 0;
+}
--
1.7.2.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* Re: [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace
2011-05-06 15:26 [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Steven Rostedt
` (12 preceding siblings ...)
2011-05-06 15:26 ` [RFC][PATCH 13/13] ftrace: Add self-tests for multiple function trace users Steven Rostedt
@ 2011-05-10 8:00 ` Ingo Molnar
2011-05-10 8:32 ` Steven Rostedt
13 siblings, 1 reply; 17+ messages in thread
From: Ingo Molnar @ 2011-05-10 8:00 UTC (permalink / raw)
To: Steven Rostedt
Cc: linux-kernel, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
* Steven Rostedt <rostedt@goodmis.org> wrote:
>
> Ingo, et. al.
>
> This is an RFC patch set of the work I did to allow multiple users
> to pick and choose which functions they would like to trace without
> being affected by other users (well, they are still affected, but
> they can choose their own functions).
>
> Since this rewrote a lot of the guts of ftrace's function handling
> I broke it up into steps that I can sequentially think about how to
> make the conversion. It also helped in understanding the changes that
> had to be made.
>
> The end result now has this:
>
> o The function record list flags item now stores a ref count, and
> we no longer need to have the flags if the records are filtered
> or set for "notrace"
>
> o Each ftrace_ops has its own set of functions to trace or not trace
> which is used to update the ref counts of the function records
> mentioned above. If the ftrace_ops is not currently active, the
> records are not touched when the filters are changed. If the filters
> are changed while the ftrace_ops is active, it is done in a way
> with RCU to make the updates.
>
> o If only one ftrace_ops is registered for tracing (actively tracing),
> its handler is called directly from mcount. Unless the ftrace_ops
> was allocated and not a static/global core kernel variable (see below).
>
> o If more than one ftrace_ops is registered, then a function is called
> that will iterate over the list of registered ftrace_ops, performing
> a check of each of its hashes to know if ftrace_ops handler should
> be called for that function.
>
> o If a ftrace_ops is allocated and not a static/global core kernel
> variable, it will be forced to use the function that loops through
> the ftrace_ops even if its the only one. This is because that function
> disables preemption when it performs the loop. This is needed, because
> dynamically allocated ftrace_ops must call synchronize_sched() before
> being freed (which is done when the dynamic ftrace_ops has been
> unregistered).
>
> o A file has been created in the debugfs/tracing directory called
> enabled_functions. This file contains a list of all functions that
> any ftrace_ops has registered, along with each functions ref count.
> This can be used to make sure nothing is tracing.
Well, i think we'd like to see the end result as well: i.e. function tracing
exposed to a tracer that can do per task tracing (perf) and then demonstrating
that independent tracing sessions can trace functions without impacting each
other.
Thanks,
Ingo
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace
2011-05-10 8:00 ` [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace Ingo Molnar
@ 2011-05-10 8:32 ` Steven Rostedt
2011-05-10 14:29 ` Frederic Weisbecker
0 siblings, 1 reply; 17+ messages in thread
From: Steven Rostedt @ 2011-05-10 8:32 UTC (permalink / raw)
To: Ingo Molnar
Cc: linux-kernel, Andrew Morton, Thomas Gleixner, Frederic Weisbecker,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
On Tue, 2011-05-10 at 10:00 +0200, Ingo Molnar wrote:
> * Steven Rostedt <rostedt@goodmis.org> wrote:
>
> Well, i think we'd like to see the end result as well: i.e. function tracing
> exposed to a tracer that can do per task tracing (perf) and then demonstrating
> that independent tracing sessions can trace functions without impacting each
> other.
This could be done. I just wanted to post this RFC for now, just in case
someone found an issue with this design. When I get back home, I'll
complete this, and work with Frederic to get a RFC patch set with perf
working.
I don't want to rush this out as it can get complex and I want us to
"learn" the correct way of enabling this for perf. Although, it would be
nice to get this into 2.6.40. Perhaps we should be conservative and aim
for 2.6.41 and let it settle for a release.
-- Steve
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [RFC][PATCH 00/13] ftrace: Allow multiple users to pick and choose functions to trace
2011-05-10 8:32 ` Steven Rostedt
@ 2011-05-10 14:29 ` Frederic Weisbecker
0 siblings, 0 replies; 17+ messages in thread
From: Frederic Weisbecker @ 2011-05-10 14:29 UTC (permalink / raw)
To: Steven Rostedt
Cc: Ingo Molnar, linux-kernel, Andrew Morton, Thomas Gleixner,
Mathieu Desnoyers, Lai Jiangshan, Li Zefan, Jiri Olsa,
David Sharp, Vaibhav Nagarnaik, Michael Rubin
On Tue, May 10, 2011 at 04:32:18AM -0400, Steven Rostedt wrote:
> On Tue, 2011-05-10 at 10:00 +0200, Ingo Molnar wrote:
> > * Steven Rostedt <rostedt@goodmis.org> wrote:
> >
>
> > Well, i think we'd like to see the end result as well: i.e. function tracing
> > exposed to a tracer that can do per task tracing (perf) and then demonstrating
> > that independent tracing sessions can trace functions without impacting each
> > other.
>
> This could be done. I just wanted to post this RFC for now, just in case
> someone found an issue with this design. When I get back home, I'll
> complete this, and work with Frederic to get a RFC patch set with perf
> working.
>
> I don't want to rush this out as it can get complex and I want us to
> "learn" the correct way of enabling this for perf. Although, it would be
> nice to get this into 2.6.40. Perhaps we should be conservative and aim
> for 2.6.41 and let it settle for a release.
In fact getting the function tracer able to run with multi users and
making it usable by perf are two different (although for the same final
purpose) and complicated tasks on their own.
Thus it seems to me more reasonable to advance step by step like we are doing.
^ permalink raw reply [flat|nested] 17+ messages in thread