* [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups
@ 2009-05-04 15:19 Steven Rostedt
2009-05-04 15:19 ` [PATCH 1/6] ring-buffer: add counters for commit overrun and nmi dropped entries Steven Rostedt
` (5 more replies)
0 siblings, 6 replies; 7+ messages in thread
From: Steven Rostedt @ 2009-05-04 15:19 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Frederic Weisbecker
Ingo,
Please pull the latest tip/tracing/ftrace-1 tree, which can be found at:
git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace.git
tip/tracing/ftrace-1
Steven Rostedt (6):
ring-buffer: add counters for commit overrun and nmi dropped entries
tracing: export stats of ring buffers to userspace
ring-buffer: convert cpu buffer entries to local_t
ring-buffer: record page entries in buffer page descriptor
ring-buffer: have read page swap increment counter with page entries
ring-buffer: disable writers when resetting buffers
----
include/linux/ring_buffer.h | 2 +
kernel/trace/ring_buffer.c | 141 ++++++++++++++++++++++++-------------------
kernel/trace/trace.c | 42 +++++++++++++
3 files changed, 123 insertions(+), 62 deletions(-)
--
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/6] ring-buffer: add counters for commit overrun and nmi dropped entries
2009-05-04 15:19 [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups Steven Rostedt
@ 2009-05-04 15:19 ` Steven Rostedt
2009-05-04 15:19 ` [PATCH 2/6] tracing: export stats of ring buffers to userspace Steven Rostedt
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2009-05-04 15:19 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Frederic Weisbecker
[-- Attachment #1: 0001-ring-buffer-add-counters-for-commit-overrun-and-nmi.patch --]
[-- Type: text/plain, Size: 4485 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
The WARN_ON in the ring buffer when a commit is preempted and the
buffer is filled by preceding writes can happen in normal operations.
The WARN_ON makes it look like a bug, not to mention, because
it does not stop tracing and calls printk which can also recurse, this
is prone to deadlock (the WARN_ON is not in a position to recurse).
This patch removes the WARN_ON and replaces it with a counter that
can be retrieved by a tracer. This counter is called commit_overrun.
While at it, I added a nmi_dropped counter to count any time an NMI entry
is dropped because the NMI could not take the spinlock.
[ Impact: prevent deadlock by printing normal case warning ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/ring_buffer.h | 2 +
kernel/trace/ring_buffer.c | 52 ++++++++++++++++++++++++++++++++++++++++--
2 files changed, 51 insertions(+), 3 deletions(-)
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 1c2f809..f134582 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -153,6 +153,8 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f4cc590..dc8b2ab 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -402,6 +402,8 @@ struct ring_buffer_per_cpu {
struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
+ unsigned long nmi_dropped;
+ unsigned long commit_overrun;
unsigned long overrun;
unsigned long entries;
u64 write_stamp;
@@ -1216,8 +1218,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* simply fail.
*/
if (unlikely(in_nmi())) {
- if (!__raw_spin_trylock(&cpu_buffer->lock))
+ if (!__raw_spin_trylock(&cpu_buffer->lock)) {
+ cpu_buffer->nmi_dropped++;
goto out_reset;
+ }
} else
__raw_spin_lock(&cpu_buffer->lock);
@@ -1238,8 +1242,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* about it.
*/
if (unlikely(next_page == commit_page)) {
- /* This can easily happen on small ring buffers */
- WARN_ON_ONCE(buffer->pages > 2);
+ cpu_buffer->commit_overrun++;
goto out_reset;
}
@@ -1926,6 +1929,47 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
+ * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of overruns from
+ */
+unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+ ret = cpu_buffer->nmi_dropped;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
+
+/**
+ * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of overruns from
+ */
+unsigned long
+ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+ ret = cpu_buffer->commit_overrun;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
+
+/**
* ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer
*
@@ -2595,6 +2639,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
+ cpu_buffer->nmi_dropped = 0;
+ cpu_buffer->commit_overrun = 0;
cpu_buffer->overrun = 0;
cpu_buffer->entries = 0;
--
1.6.2.4
--
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/6] tracing: export stats of ring buffers to userspace
2009-05-04 15:19 [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups Steven Rostedt
2009-05-04 15:19 ` [PATCH 1/6] ring-buffer: add counters for commit overrun and nmi dropped entries Steven Rostedt
@ 2009-05-04 15:19 ` Steven Rostedt
2009-05-04 15:19 ` [PATCH 3/6] ring-buffer: convert cpu buffer entries to local_t Steven Rostedt
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2009-05-04 15:19 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Frederic Weisbecker
[-- Attachment #1: 0002-tracing-export-stats-of-ring-buffers-to-userspace.patch --]
[-- Type: text/plain, Size: 2628 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
This patch adds stats to the ftrace ring buffers:
# cat /debugfs/tracing/per_cpu/cpu0/stats
entries: 42360
overrun: 30509326
commit overrun: 0
nmi dropped: 0
Where entries are the total number of data entries in the buffer.
overrun is the number of entries not consumed and were overwritten by
the writer.
commit overrun is the number of entries dropped due to nested writers
wrapping the buffer before the initial writer finished the commit.
nmi dropped is the number of entries dropped due to the ring buffer
lock being held when an nmi was going to write to the ring buffer.
Note, this field will be meaningless and will go away when the ring
buffer becomes lockless.
[ Impact: let userspace know what is happening in the ring buffers ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/trace.c | 42 ++++++++++++++++++++++++++++++++++++++++++
1 files changed, 42 insertions(+), 0 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f5427e0..74df029 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3595,6 +3595,45 @@ static const struct file_operations tracing_buffers_fops = {
.llseek = no_llseek,
};
+static ssize_t
+tracing_stats_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long cpu = (unsigned long)filp->private_data;
+ struct trace_array *tr = &global_trace;
+ struct trace_seq *s;
+ unsigned long cnt;
+
+ s = kmalloc(sizeof(*s), GFP_ATOMIC);
+ if (!s)
+ return ENOMEM;
+
+ trace_seq_init(s);
+
+ cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
+ trace_seq_printf(s, "entries: %ld\n", cnt);
+
+ cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
+ trace_seq_printf(s, "overrun: %ld\n", cnt);
+
+ cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
+ trace_seq_printf(s, "commit overrun: %ld\n", cnt);
+
+ cnt = ring_buffer_nmi_dropped_cpu(tr->buffer, cpu);
+ trace_seq_printf(s, "nmi dropped: %ld\n", cnt);
+
+ count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
+
+ kfree(s);
+
+ return count;
+}
+
+static const struct file_operations tracing_stats_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_stats_read,
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
int __weak ftrace_arch_read_dyn_info(char *buf, int size)
@@ -3708,6 +3747,9 @@ static void tracing_init_debugfs_percpu(long cpu)
trace_create_file("trace_pipe_raw", 0444, d_cpu,
(void *) cpu, &tracing_buffers_fops);
+
+ trace_create_file("stats", 0444, d_cpu,
+ (void *) cpu, &tracing_stats_fops);
}
#ifdef CONFIG_FTRACE_SELFTEST
--
1.6.2.4
--
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 3/6] ring-buffer: convert cpu buffer entries to local_t
2009-05-04 15:19 [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups Steven Rostedt
2009-05-04 15:19 ` [PATCH 1/6] ring-buffer: add counters for commit overrun and nmi dropped entries Steven Rostedt
2009-05-04 15:19 ` [PATCH 2/6] tracing: export stats of ring buffers to userspace Steven Rostedt
@ 2009-05-04 15:19 ` Steven Rostedt
2009-05-04 15:19 ` [PATCH 4/6] ring-buffer: record page entries in buffer page descriptor Steven Rostedt
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2009-05-04 15:19 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Frederic Weisbecker
[-- Attachment #1: 0003-ring-buffer-convert-cpu-buffer-entries-to-local_t.patch --]
[-- Type: text/plain, Size: 4161 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
The entries counter in cpu buffer is not atomic. It can be updated by
other interrupts or from another CPU (readers).
But making entries into "atomic_t" causes an atomic operation that can
hurt performance. Instead we convert it to a local_t that will increment
a counter with a local CPU atomic operation (if the arch supports it).
Instead of fighting with readers and overwrites that decrement the counter,
I added a "read" counter. Every time a reader reads an entry it is
incremented.
We already have a overrun counter and with that, the entries counter and
the read counter, we can calculate the total number of entries in the
buffer with:
(entries - overrun) - read
As long as the total number of entries in the ring buffer is less than
the word size, this will work. But since the entries counter was previously
a long, this is no different than what we had before.
Thanks to Andrew Morton for pointing out in the first version that
atomic_t does not replace unsigned long. I switched to atomic_long_t
even though it is signed. A negative count is most likely a bug.
[ Impact: keep accurate count of cpu buffer entries ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ring_buffer.c | 20 +++++++++++---------
1 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index dc8b2ab..f2d56e9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -405,7 +405,8 @@ struct ring_buffer_per_cpu {
unsigned long nmi_dropped;
unsigned long commit_overrun;
unsigned long overrun;
- unsigned long entries;
+ unsigned long read;
+ local_t entries;
u64 write_stamp;
u64 read_stamp;
atomic_t record_disabled;
@@ -997,7 +998,6 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
continue;
cpu_buffer->overrun++;
- cpu_buffer->entries--;
}
}
@@ -1588,7 +1588,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
- cpu_buffer->entries++;
+ local_inc(&cpu_buffer->entries);
/* Only process further if we own the commit */
if (!rb_is_commit(cpu_buffer, event))
@@ -1722,7 +1722,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
* The commit is still visible by the reader, so we
* must increment entries.
*/
- cpu_buffer->entries++;
+ local_inc(&cpu_buffer->entries);
out:
/*
* If a write came in and pushed the tail page
@@ -1902,7 +1902,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
- ret = cpu_buffer->entries;
+ ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
+ - cpu_buffer->read;
return ret;
}
@@ -1985,7 +1986,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
- entries += cpu_buffer->entries;
+ entries += local_read(&cpu_buffer->entries);
}
return entries;
@@ -2225,7 +2226,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
|| rb_discarded_event(event))
- cpu_buffer->entries--;
+ cpu_buffer->read++;
rb_update_read_stamp(cpu_buffer, event);
@@ -2642,7 +2643,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->nmi_dropped = 0;
cpu_buffer->commit_overrun = 0;
cpu_buffer->overrun = 0;
- cpu_buffer->entries = 0;
+ cpu_buffer->read = 0;
+ local_set(&cpu_buffer->entries, 0);
cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0;
@@ -2813,7 +2815,7 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
/* Only count data entries */
if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
continue;
- cpu_buffer->entries--;
+ cpu_buffer->read++;
}
__raw_spin_unlock(&cpu_buffer->lock);
}
--
1.6.2.4
--
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 4/6] ring-buffer: record page entries in buffer page descriptor
2009-05-04 15:19 [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups Steven Rostedt
` (2 preceding siblings ...)
2009-05-04 15:19 ` [PATCH 3/6] ring-buffer: convert cpu buffer entries to local_t Steven Rostedt
@ 2009-05-04 15:19 ` Steven Rostedt
2009-05-04 15:19 ` [PATCH 5/6] ring-buffer: have read page swap increment counter with page entries Steven Rostedt
2009-05-04 15:19 ` [PATCH 6/6] ring-buffer: disable writers when resetting buffers Steven Rostedt
5 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2009-05-04 15:19 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Frederic Weisbecker
[-- Attachment #1: 0004-ring-buffer-record-page-entries-in-buffer-page-desc.patch --]
[-- Type: text/plain, Size: 4653 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
Currently, when the ring buffer writer overflows the buffer and must
write over non consumed data, we increment the overrun counter by
reading the entries on the page we are about to overwrite. This reads
the entries one by one.
This is not very effecient. This patch adds another entry counter
into each buffer page descriptor that keeps track of the number of
entries on the page. Now on overwrite, the overrun counter simply
needs to add the number of entries that is on the page it is about
to overwrite.
[ Impact: speed up of ring buffer in overwrite mode ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ring_buffer.c | 39 +++++++++++++--------------------------
1 files changed, 13 insertions(+), 26 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f2d56e9..18e87b5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -321,9 +321,10 @@ struct buffer_data_page {
};
struct buffer_page {
+ struct list_head list; /* list of buffer pages */
local_t write; /* index for next write */
unsigned read; /* index for next read */
- struct list_head list; /* list of free pages */
+ local_t entries; /* entries on this page */
struct buffer_data_page *page; /* Actual data page */
};
@@ -977,30 +978,6 @@ static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
return rb_page_commit(cpu_buffer->head_page);
}
-/*
- * When the tail hits the head and the buffer is in overwrite mode,
- * the head jumps to the next page and all content on the previous
- * page is discarded. But before doing so, we update the overrun
- * variable of the buffer.
- */
-static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
-{
- struct ring_buffer_event *event;
- unsigned long head;
-
- for (head = 0; head < rb_head_size(cpu_buffer);
- head += rb_event_length(event)) {
-
- event = __rb_page_index(cpu_buffer->head_page, head);
- if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
- return;
- /* Only count data entries */
- if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
- continue;
- cpu_buffer->overrun++;
- }
-}
-
static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page **bpage)
{
@@ -1253,7 +1230,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* tail_page has not moved yet? */
if (tail_page == cpu_buffer->tail_page) {
/* count overflows */
- rb_update_overflow(cpu_buffer);
+ cpu_buffer->overrun +=
+ local_read(&head_page->entries);
rb_inc_page(cpu_buffer, &head_page);
cpu_buffer->head_page = head_page;
@@ -1268,6 +1246,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
*/
if (tail_page == cpu_buffer->tail_page) {
local_set(&next_page->write, 0);
+ local_set(&next_page->entries, 0);
local_set(&next_page->page->commit, 0);
cpu_buffer->tail_page = next_page;
@@ -1313,6 +1292,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
event = __rb_page_index(tail_page, tail);
rb_update_event(event, type, length);
+ /* The passed in type is zero for DATA */
+ if (likely(!type))
+ local_inc(&tail_page->entries);
+
/*
* If this is a commit and the tail is zero, then update
* this page's time stamp.
@@ -2182,6 +2165,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->list.prev = reader->list.prev;
local_set(&cpu_buffer->reader_page->write, 0);
+ local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0);
/* Make the reader page now replace the head */
@@ -2628,6 +2612,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->head_page
= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
local_set(&cpu_buffer->head_page->write, 0);
+ local_set(&cpu_buffer->head_page->entries, 0);
local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
@@ -2637,6 +2622,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
local_set(&cpu_buffer->reader_page->write, 0);
+ local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
@@ -2993,6 +2979,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
bpage = reader->page;
reader->page = *data_page;
local_set(&reader->write, 0);
+ local_set(&reader->entries, 0);
reader->read = 0;
*data_page = bpage;
--
1.6.2.4
--
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 5/6] ring-buffer: have read page swap increment counter with page entries
2009-05-04 15:19 [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups Steven Rostedt
` (3 preceding siblings ...)
2009-05-04 15:19 ` [PATCH 4/6] ring-buffer: record page entries in buffer page descriptor Steven Rostedt
@ 2009-05-04 15:19 ` Steven Rostedt
2009-05-04 15:19 ` [PATCH 6/6] ring-buffer: disable writers when resetting buffers Steven Rostedt
5 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2009-05-04 15:19 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Frederic Weisbecker
[-- Attachment #1: 0005-ring-buffer-have-read-page-swap-increment-counter-w.patch --]
[-- Type: text/plain, Size: 2047 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
In the swap page ring buffer code that is used by the ftrace splice code,
we scan the page to increment the counter of entries read.
With the number of entries already in the page we simply need to add it.
[ Impact: speed up reading page from ring buffer ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ring_buffer.c | 28 +++-------------------------
1 files changed, 3 insertions(+), 25 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 18e87b5..7ff1f57 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2784,28 +2784,6 @@ out:
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
-static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
- struct buffer_data_page *bpage,
- unsigned int offset)
-{
- struct ring_buffer_event *event;
- unsigned long head;
-
- __raw_spin_lock(&cpu_buffer->lock);
- for (head = offset; head < local_read(&bpage->commit);
- head += rb_event_length(event)) {
-
- event = __rb_data_page_index(bpage, head);
- if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
- return;
- /* Only count data entries */
- if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
- continue;
- cpu_buffer->read++;
- }
- __raw_spin_unlock(&cpu_buffer->lock);
-}
-
/**
* ring_buffer_alloc_read_page - allocate a page to read from buffer
* @buffer: the buffer to allocate for.
@@ -2974,6 +2952,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
/* we copied everything to the beginning */
read = 0;
} else {
+ /* update the entry counter */
+ cpu_buffer->read += local_read(&reader->entries);
+
/* swap the pages */
rb_init_page(bpage);
bpage = reader->page;
@@ -2982,9 +2963,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
local_set(&reader->entries, 0);
reader->read = 0;
*data_page = bpage;
-
- /* update the entry counter */
- rb_remove_entries(cpu_buffer, bpage, read);
}
ret = read;
--
1.6.2.4
--
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 6/6] ring-buffer: disable writers when resetting buffers
2009-05-04 15:19 [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups Steven Rostedt
` (4 preceding siblings ...)
2009-05-04 15:19 ` [PATCH 5/6] ring-buffer: have read page swap increment counter with page entries Steven Rostedt
@ 2009-05-04 15:19 ` Steven Rostedt
5 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2009-05-04 15:19 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Frederic Weisbecker
[-- Attachment #1: 0006-ring-buffer-disable-writers-when-resetting-buffers.patch --]
[-- Type: text/plain, Size: 1092 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
As a precaution, it is best to disable writing to the ring buffers
when reseting them.
[ Impact: prevent weird things if write happens during reset ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/ring_buffer.c | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7ff1f57..24372d1 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2649,6 +2649,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
+ atomic_inc(&cpu_buffer->record_disabled);
+
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
__raw_spin_lock(&cpu_buffer->lock);
@@ -2658,6 +2660,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
__raw_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ atomic_dec(&cpu_buffer->record_disabled);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
--
1.6.2.4
--
^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2009-05-04 15:22 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-05-04 15:19 [PATCH 0/6] [GIT PULL v2] ring-buffer: speed ups Steven Rostedt
2009-05-04 15:19 ` [PATCH 1/6] ring-buffer: add counters for commit overrun and nmi dropped entries Steven Rostedt
2009-05-04 15:19 ` [PATCH 2/6] tracing: export stats of ring buffers to userspace Steven Rostedt
2009-05-04 15:19 ` [PATCH 3/6] ring-buffer: convert cpu buffer entries to local_t Steven Rostedt
2009-05-04 15:19 ` [PATCH 4/6] ring-buffer: record page entries in buffer page descriptor Steven Rostedt
2009-05-04 15:19 ` [PATCH 5/6] ring-buffer: have read page swap increment counter with page entries Steven Rostedt
2009-05-04 15:19 ` [PATCH 6/6] ring-buffer: disable writers when resetting buffers Steven Rostedt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox