linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] slub/slab: fix kmemleak didn't work on some case
@ 2015-06-08  5:14 Liu, XinwuX
  2015-06-08  9:38 ` Christoph Lameter
  2015-06-08 10:03 ` Catalin Marinas
  0 siblings, 2 replies; 9+ messages in thread
From: Liu, XinwuX @ 2015-06-08  5:14 UTC (permalink / raw)
  To: catalin.marinas@arm.com, cl@linux-foundation.org,
	penberg@kernel.org, mpm@selenic.com
  Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	yanmin_zhang@linux.intel.com, He, Bo, Chen, Lin Z

[-- Attachment #1: Type: text/plain, Size: 6183 bytes --]

when kernel uses kmalloc to allocate memory, slub/slab will find
a suitable kmem_cache. Ususally the cache's object size is often
greater than requested size. There is unused space which contains
dirty data. These dirty data might have pointers pointing to a block
of leaked memory. Kernel wouldn't consider this memory as leaked when
scanning kmemleak object.

The patch fixes it by clearing the unused memory.

Signed-off-by: Liu, XinwuX <xinwux.liu@intel.com>
Signed-off-by: Chen Lin Z <lin.z.chen@intel.com>
---
mm/slab.c | 22 +++++++++++++++++++++-
mm/slub.c | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/mm/slab.c b/mm/slab.c
index 7eb38dd..ef25e7d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3423,6 +3423,12 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
                ret = slab_alloc(cachep, flags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = cachep->object_size - size;
+
+             if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
               trace_kmalloc(_RET_IP_, ret,
                                     size, cachep->size, flags);
               return ret;
@@ -3476,11 +3482,19 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
               struct kmem_cache *cachep;
+             void *ret;
                cachep = kmalloc_slab(size, flags);
               if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                               return cachep;
-              return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+             ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = cachep->object_size - size;
+
+             if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
+             return ret;
}
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3513,6 +3527,12 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
               if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                               return cachep;
               ret = slab_alloc(cachep, flags, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = cachep->object_size - size;
+
+             if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
                trace_kmalloc(caller, ret,
                                     size, cachep->size, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 54c0876..b53d9af 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2530,6 +2530,12 @@ EXPORT_SYMBOL(kmem_cache_alloc);
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
               void *ret = slab_alloc(s, gfpflags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = s->object_size - size;
+
+             if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
               trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
               kasan_kmalloc(s, ret, size);
               return ret;
@@ -2556,6 +2562,12 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
               void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = s->object_size - size;
+
+             if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
               trace_kmalloc_node(_RET_IP_, ret,
                                                  size, s->size, gfpflags, node);
@@ -3316,6 +3328,12 @@ void *__kmalloc(size_t size, gfp_t flags)
                               return s;
                ret = slab_alloc(s, flags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = s->object_size - size;
+
+             if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
                trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
@@ -3361,6 +3379,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
                               return s;
                ret = slab_alloc_node(s, flags, node, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = s->object_size - size;
+
+             if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
                trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
@@ -3819,7 +3843,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
                               return s;
                ret = slab_alloc(s, gfpflags, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = s->object_size - size;
+             if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
               /* Honor the call site pointer we received. */
               trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -3849,6 +3878,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
                               return s;
                ret = slab_alloc_node(s, gfpflags, node, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+             int delta = s->object_size - size;
+
+             if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+                             memset((void *)((char *)ret + size), 0, delta);
+#endif
                /* Honor the call site pointer we received. */
               trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
--
1.9.1

[-- Attachment #2: Type: text/html, Size: 20161 bytes --]

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2015-06-11  8:19 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-06-08  5:14 [PATCH] slub/slab: fix kmemleak didn't work on some case Liu, XinwuX
2015-06-08  9:38 ` Christoph Lameter
2015-06-08 10:13   ` Catalin Marinas
2015-06-09  8:10     ` Zhang, Yanmin
2015-06-09 15:03       ` Catalin Marinas
2015-06-10  7:45         ` Zhang, Yanmin
2015-06-10  9:48           ` Catalin Marinas
2015-06-11  8:18             ` Liu, XinwuX
2015-06-08 10:03 ` Catalin Marinas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).