* [RFC][PATCH 1/2] [PATCH 1/2] tracing/slab: Move kmalloc tracepoint out of inline code
2010-11-24 21:23 [RFC][PATCH 0/2] Move kmalloc tracepoints out of inlined code Steven Rostedt
@ 2010-11-24 21:23 ` Steven Rostedt
2010-11-24 21:23 ` [RFC][PATCH 2/2] [PATCH 2/2] tracing/slub: " Steven Rostedt
1 sibling, 0 replies; 6+ messages in thread
From: Steven Rostedt @ 2010-11-24 21:23 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Peter Zijlstra, Frederic Weisbecker,
Christoph Lameter, Pekka Enberg, Matt Mackall, linux-mm,
Eduard - Gabriel Munteanu
[-- Attachment #1: 0001-tracing-slab-Move-kmalloc-tracepoint-out-of-inline-c.patch --]
[-- Type: text/plain, Size: 5716 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
The tracepoint for kmalloc is in the slab inlined code which causes
every instance of kmalloc to have the tracepoint.
This patch moves the tracepoint out of the inline code to the
slab C file, which removes a large number of inlined trace
points.
objdump -dr vmlinux.slab| grep 'jmpq.*<trace_kmalloc' |wc -l
213
objdump -dr vmlinux.slab.patched| grep 'jmpq.*<trace_kmalloc' |wc -l
1
This also has a nice impact on size.
text data bss dec hex filename
7023060 2121564 2482432 11627056 b16a30 vmlinux.slab
6970579 2109772 2482432 11562783 b06f1f vmlinux.slab.patched
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/slab_def.h | 33 +++++++++++++--------------------
mm/slab.c | 38 +++++++++++++++++++++++---------------
2 files changed, 36 insertions(+), 35 deletions(-)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 791a502..83203ae 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
+extern void *kmem_cache_alloc_trace(size_t size,
+ struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
static __always_inline void *
-kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{
return kmem_cache_alloc(cachep, flags);
}
@@ -179,10 +180,7 @@ found:
#endif
cachep = malloc_sizes[i].cs_cachep;
- ret = kmem_cache_alloc_notrace(cachep, flags);
-
- trace_kmalloc(_THIS_IP_, ret,
- size, slab_buffer_size(cachep), flags);
+ ret = kmem_cache_alloc_trace(size, cachep, flags);
return ret;
}
@@ -194,14 +192,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid);
+extern void *kmem_cache_alloc_node_trace(size_t size,
+ struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid);
#else
static __always_inline void *
-kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid)
+kmem_cache_alloc_node_trace(size_t size,
+ struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid)
{
return kmem_cache_alloc_node(cachep, flags, nodeid);
}
@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *cachep;
- void *ret;
if (__builtin_constant_p(size)) {
int i = 0;
@@ -234,13 +233,7 @@ found:
#endif
cachep = malloc_sizes[i].cs_cachep;
- ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
-
- trace_kmalloc_node(_THIS_IP_, ret,
- size, slab_buffer_size(cachep),
- flags, node);
-
- return ret;
+ return kmem_cache_alloc_node_trace(size, cachep, flags, node);
}
return __kmalloc_node(size, flags, node);
}
diff --git a/mm/slab.c b/mm/slab.c
index b1e40da..dfcc888 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
-void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+void *
+kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{
- return __cache_alloc(cachep, flags, __builtin_return_address(0));
+ void *ret;
+
+ ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
+
+ trace_kmalloc(_RET_IP_, ret,
+ size, slab_buffer_size(cachep), flags);
+ return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
/**
@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_TRACING
-void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid)
+void *kmem_cache_alloc_node_trace(size_t size,
+ struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid)
{
- return __cache_alloc_node(cachep, flags, nodeid,
+ void *ret;
+
+ ret = __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, slab_buffer_size(cachep),
+ flags, nodeid);
+ return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{
struct kmem_cache *cachep;
- void *ret;
cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
-
- trace_kmalloc_node((unsigned long) caller, ret,
- size, cachep->buffer_size, flags, node);
-
- return ret;
+ return kmem_cache_alloc_node_trace(size, cachep, flags, node);
}
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
--
1.7.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom policy in Canada: sign http://dissolvethecrtc.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [RFC][PATCH 2/2] [PATCH 2/2] tracing/slub: Move kmalloc tracepoint out of inline code
2010-11-24 21:23 [RFC][PATCH 0/2] Move kmalloc tracepoints out of inlined code Steven Rostedt
2010-11-24 21:23 ` [RFC][PATCH 1/2] [PATCH 1/2] tracing/slab: Move kmalloc tracepoint out of inline code Steven Rostedt
@ 2010-11-24 21:23 ` Steven Rostedt
2010-11-25 1:00 ` Li Zefan
1 sibling, 1 reply; 6+ messages in thread
From: Steven Rostedt @ 2010-11-24 21:23 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Andrew Morton, Peter Zijlstra, Frederic Weisbecker,
Christoph Lameter, Pekka Enberg, Matt Mackall, linux-mm,
Eduard - Gabriel Munteanu
[-- Attachment #1: 0002-tracing-slub-Move-kmalloc-tracepoint-out-of-inline-c.patch --]
[-- Type: text/plain, Size: 6673 bytes --]
From: Steven Rostedt <srostedt@redhat.com>
The tracepoint for kmalloc is in the slub inlined code which causes
every instance of kmalloc to have the tracepoint.
This patch moves the tracepoint out of the inline code to the
slub C file (and to page_alloc), which removes a large number of
inlined trace points.
objdump -dr vmlinux.slub| grep 'jmpq.*<trace_kmalloc' |wc -l
375
objdump -dr vmlinux.slub.patched| grep 'jmpq.*<trace_kmalloc' |wc -l
2
This also has a nice impact on size.
text data bss dec hex filename
7050424 1961068 2482688 11494180 af6324 vmlinux.slub
6979599 1944620 2482688 11406907 ae0e3b vmlinux.slub.patched
Siged-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/slub_def.h | 46 +++++++++++++++++++++-------------------------
mm/page_alloc.c | 14 ++++++++++++++
mm/slub.c | 27 +++++++++++++++++++--------
3 files changed, 54 insertions(+), 33 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e4f5ed1..d390b18 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -217,30 +217,35 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
+extern void *kmem_cache_alloc_trace(size_t size,
+ struct kmem_cache *s, gfp_t gfpflags);
+unsigned long __get_free_pages_trace(size_t size,
+ gfp_t gfp_mask, unsigned int order);
#else
static __always_inline void *
-kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+kmem_cache_alloc_trace(size_t size, struct kmem_cache *s, gfp_t gfpflags)
{
return kmem_cache_alloc(s, gfpflags);
}
+static __always_inline unsigned long
+__get_free_pages_trace(size_t size, gfp_t gfp_mask, unsigned int order)
+{
+ return __get_free_pages(gfp_mask, order);
+}
#endif
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
- void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+ void *ret = __get_free_pages_trace(size, flags | __GFP_COMP, order);
kmemleak_alloc(ret, size, 1, flags);
- trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
return ret;
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
- void *ret;
-
if (__builtin_constant_p(size)) {
if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags);
@@ -251,11 +256,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if (!s)
return ZERO_SIZE_PTR;
- ret = kmem_cache_alloc_notrace(s, flags);
-
- trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
-
- return ret;
+ return kmem_cache_alloc_trace(size, s, flags);
}
}
return __kmalloc(size, flags);
@@ -266,14 +267,16 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node);
+extern void *kmem_cache_alloc_node_trace(size_t size,
+ struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node);
#else
static __always_inline void *
-kmem_cache_alloc_node_notrace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node)
+kmem_cache_alloc_node_trace(struct size_t size,
+ struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node)
{
return kmem_cache_alloc_node(s, gfpflags, node);
}
@@ -281,8 +284,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- void *ret;
-
if (__builtin_constant_p(size) &&
size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
@@ -290,12 +291,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
if (!s)
return ZERO_SIZE_PTR;
- ret = kmem_cache_alloc_node_notrace(s, flags, node);
-
- trace_kmalloc_node(_THIS_IP_, ret,
- size, s->size, flags, node);
-
- return ret;
+ return kmem_cache_alloc_node_trace(size, s, flags, node);
}
return __kmalloc_node(size, flags, node);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07a6544..c65e891 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2187,6 +2187,20 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
}
EXPORT_SYMBOL(__get_free_pages);
+#ifdef CONFIG_TRACING
+unsigned long
+__get_free_pages_trace(size_t size, gfp_t gfp_mask, unsigned int order)
+{
+ unsigned long ret;
+
+ ret = __get_free_pages(gfp_mask, order);
+ trace_kmalloc(_RET_IP_, (void *)ret, size,
+ PAGE_SIZE << order, gfp_mask);
+ return ret;
+}
+EXPORT_SYMBOL(__get_free_pages_trace);
+#endif
+
unsigned long get_zeroed_page(gfp_t gfp_mask)
{
return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
diff --git a/mm/slub.c b/mm/slub.c
index 981fb73..35d0eb4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1774,11 +1774,16 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
-void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+void *kmem_cache_alloc_trace(size_t size, struct kmem_cache *s, gfp_t gfpflags)
{
- return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
+ void *ret;
+
+ ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
+
+ return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
#ifdef CONFIG_NUMA
@@ -1794,13 +1799,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_TRACING
-void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node)
+void *kmem_cache_alloc_node_trace(size_t size,
+ struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node)
{
- return slab_alloc(s, gfpflags, node, _RET_IP_);
+ void *ret;
+
+ ret = slab_alloc(s, gfpflags, node, _RET_IP_);
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, s->size, gfpflags, node);
+ return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
#endif
--
1.7.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom policy in Canada: sign http://dissolvethecrtc.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 6+ messages in thread