* [PATCH] slub/slab: fix kmemleak didn't work on some case
@ 2015-06-08 5:14 Liu, XinwuX
2015-06-08 9:38 ` Christoph Lameter
2015-06-08 10:03 ` Catalin Marinas
0 siblings, 2 replies; 9+ messages in thread
From: Liu, XinwuX @ 2015-06-08 5:14 UTC (permalink / raw)
To: catalin.marinas@arm.com, cl@linux-foundation.org,
penberg@kernel.org, mpm@selenic.com
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
yanmin_zhang@linux.intel.com, He, Bo, Chen, Lin Z
[-- Attachment #1: Type: text/plain, Size: 6183 bytes --]
when kernel uses kmalloc to allocate memory, slub/slab will find
a suitable kmem_cache. Ususally the cache's object size is often
greater than requested size. There is unused space which contains
dirty data. These dirty data might have pointers pointing to a block
of leaked memory. Kernel wouldn't consider this memory as leaked when
scanning kmemleak object.
The patch fixes it by clearing the unused memory.
Signed-off-by: Liu, XinwuX <xinwux.liu@intel.com>
Signed-off-by: Chen Lin Z <lin.z.chen@intel.com>
---
mm/slab.c | 22 +++++++++++++++++++++-
mm/slub.c | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 56 insertions(+), 1 deletion(-)
diff --git a/mm/slab.c b/mm/slab.c
index 7eb38dd..ef25e7d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3423,6 +3423,12 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, flags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = cachep->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
@@ -3476,11 +3482,19 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;
+ void *ret;
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+ ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = cachep->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
+ return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3513,6 +3527,12 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = slab_alloc(cachep, flags, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = cachep->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(caller, ret,
size, cachep->size, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 54c0876..b53d9af 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2530,6 +2530,12 @@ EXPORT_SYMBOL(kmem_cache_alloc);
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
kasan_kmalloc(s, ret, size);
return ret;
@@ -2556,6 +2562,12 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
@@ -3316,6 +3328,12 @@ void *__kmalloc(size_t size, gfp_t flags)
return s;
ret = slab_alloc(s, flags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
@@ -3361,6 +3379,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return s;
ret = slab_alloc_node(s, flags, node, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
@@ -3819,7 +3843,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
return s;
ret = slab_alloc(s, gfpflags, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -3849,6 +3878,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return s;
ret = slab_alloc_node(s, gfpflags, node, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
--
1.9.1
[-- Attachment #2: Type: text/html, Size: 20161 bytes --]
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-08 5:14 [PATCH] slub/slab: fix kmemleak didn't work on some case Liu, XinwuX
@ 2015-06-08 9:38 ` Christoph Lameter
2015-06-08 10:13 ` Catalin Marinas
2015-06-08 10:03 ` Catalin Marinas
1 sibling, 1 reply; 9+ messages in thread
From: Christoph Lameter @ 2015-06-08 9:38 UTC (permalink / raw)
To: Liu, XinwuX
Cc: catalin.marinas@arm.com, penberg@kernel.org, mpm@selenic.com,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
yanmin_zhang@linux.intel.com, He, Bo, Chen, Lin Z
On Mon, 8 Jun 2015, Liu, XinwuX wrote:
> when kernel uses kmalloc to allocate memory, slub/slab will find
> a suitable kmem_cache. Ususally the cache's object size is often
> greater than requested size. There is unused space which contains
> dirty data. These dirty data might have pointers pointing to a block
dirty? In what sense?
> of leaked memory. Kernel wouldn't consider this memory as leaked when
> scanning kmemleak object.
This has never been considered leaked memory before to my knowledge and
the data is already initialized.
F.e. The zeroing function in linux/mm/slub.c::slab_alloc_node() zeros the
complete object and not only the number of bytes specified in the kmalloc
call. Same thing is true for SLAB.
I am a bit confused as to what issue this patch would address.
Also please send clean patches without special characters. Ensure proper
tabbing etc.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-08 9:38 ` Christoph Lameter
@ 2015-06-08 10:13 ` Catalin Marinas
2015-06-09 8:10 ` Zhang, Yanmin
0 siblings, 1 reply; 9+ messages in thread
From: Catalin Marinas @ 2015-06-08 10:13 UTC (permalink / raw)
To: Christoph Lameter
Cc: Liu, XinwuX, penberg@kernel.org, mpm@selenic.com,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
yanmin_zhang@linux.intel.com, He, Bo, Chen, Lin Z
On Mon, Jun 08, 2015 at 10:38:13AM +0100, Christoph Lameter wrote:
> On Mon, 8 Jun 2015, Liu, XinwuX wrote:
>
> > when kernel uses kmalloc to allocate memory, slub/slab will find
> > a suitable kmem_cache. Ususally the cache's object size is often
> > greater than requested size. There is unused space which contains
> > dirty data. These dirty data might have pointers pointing to a block
>
> dirty? In what sense?
I guess XinwuX meant uninitialised.
> > of leaked memory. Kernel wouldn't consider this memory as leaked when
> > scanning kmemleak object.
>
> This has never been considered leaked memory before to my knowledge and
> the data is already initialized.
It's not the object being allocated that is considered leaked. But
uninitialised data in this object is scanned by kmemleak and it may look
like valid pointers to real leaked objects. So such data increases the
number of kmemleak false negatives.
As I replied already, I don't think this is that bad, or at least not
worse than what kmemleak already does (looking at all data whether it's
pointer or not). It also doesn't solve the kmem_cache_alloc() case where
the original object size is no longer available.
> F.e. The zeroing function in linux/mm/slub.c::slab_alloc_node() zeros the
> complete object and not only the number of bytes specified in the kmalloc
> call. Same thing is true for SLAB.
But that's only when __GFP_ZERO is passed.
--
Catalin
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-08 10:13 ` Catalin Marinas
@ 2015-06-09 8:10 ` Zhang, Yanmin
2015-06-09 15:03 ` Catalin Marinas
0 siblings, 1 reply; 9+ messages in thread
From: Zhang, Yanmin @ 2015-06-09 8:10 UTC (permalink / raw)
To: Catalin Marinas, Christoph Lameter
Cc: Liu, XinwuX, penberg@kernel.org, mpm@selenic.com,
linux-mm@kvack.org, linux-kernel@vger.kernel.org, He, Bo,
Chen, Lin Z
On 2015/6/8 18:13, Catalin Marinas wrote:
> On Mon, Jun 08, 2015 at 10:38:13AM +0100, Christoph Lameter wrote:
>> On Mon, 8 Jun 2015, Liu, XinwuX wrote:
>>
>>> when kernel uses kmalloc to allocate memory, slub/slab will find
>>> a suitable kmem_cache. Ususally the cache's object size is often
>>> greater than requested size. There is unused space which contains
>>> dirty data. These dirty data might have pointers pointing to a block
>> dirty? In what sense?
> I guess XinwuX meant uninitialised.
Uninitialized or dirty data used before being freed.
>
>>> of leaked memory. Kernel wouldn't consider this memory as leaked when
>>> scanning kmemleak object.
>> This has never been considered leaked memory before to my knowledge and
>> the data is already initialized.
> It's not the object being allocated that is considered leaked. But
> uninitialised data in this object is scanned by kmemleak and it may look
> like valid pointers to real leaked objects. So such data increases the
> number of kmemleak false negatives.
Yes, indeed.
>
> As I replied already, I don't think this is that bad, or at least not
> worse than what kmemleak already does (looking at all data whether it's
> pointer or not).
It depends. As for memleak, developers prefers there are false alarms instead
of missing some leaked memory.
> It also doesn't solve the kmem_cache_alloc() case where
> the original object size is no longer available.
Such issue around kmem_cache_alloc() case happens only when the
caller doesn't initialize or use the full object, so the object keeps
old dirty data.
This patch is to resolve the redundant unused space (more than object size)
although the full object is used by kernel.
>
>> F.e. The zeroing function in linux/mm/slub.c::slab_alloc_node() zeros the
>> complete object and not only the number of bytes specified in the kmalloc
>> call. Same thing is true for SLAB.
> But that's only when __GFP_ZERO is passed.
>
Thanks for the kind comments. There is a balance between performance (new memset
consumes time) and debug capability.
Yanmin
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-09 8:10 ` Zhang, Yanmin
@ 2015-06-09 15:03 ` Catalin Marinas
2015-06-10 7:45 ` Zhang, Yanmin
0 siblings, 1 reply; 9+ messages in thread
From: Catalin Marinas @ 2015-06-09 15:03 UTC (permalink / raw)
To: Zhang, Yanmin
Cc: Christoph Lameter, Liu, XinwuX, penberg@kernel.org,
mpm@selenic.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org,
He, Bo, Chen, Lin Z
On Tue, Jun 09, 2015 at 09:10:45AM +0100, Zhang, Yanmin wrote:
> On 2015/6/8 18:13, Catalin Marinas wrote:
> > As I replied already, I don't think this is that bad, or at least not
> > worse than what kmemleak already does (looking at all data whether it's
> > pointer or not).
>
> It depends. As for memleak, developers prefers there are false alarms instead
> of missing some leaked memory.
Lots of false positives aren't that nice, you spend a lot of time
debugging them (I've been there in the early kmemleak days). Anyway,
your use case is not about false positives vs. negatives but just false
negatives.
My point is that there is a lot of random, pointer-like data read by
kmemleak even without this memset (e.g. thread stacks, non-pointer data
in kmalloc'ed structures, data/bss sections). Just doing this memset may
reduce the chance of false negatives a bit but I don't think it would be
noticeable.
If there is some serious memory leak (lots of objects), they would
likely show up at some point. Even if it's a one-off leak, it's possible
that it shows up after some time (e.g. the object pointing to this
memory block is freed).
> > It also doesn't solve the kmem_cache_alloc() case where
> > the original object size is no longer available.
>
> Such issue around kmem_cache_alloc() case happens only when the
> caller doesn't initialize or use the full object, so the object keeps
> old dirty data.
The kmem_cache blocks size would be aligned to a cache line, so you
still have some extra bytes never touched by the caller.
> This patch is to resolve the redundant unused space (more than object size)
> although the full object is used by kernel.
So this solves only the cases where the original object size is still
known (e.g. kmalloc). It could also be solved by telling kmemleak the
actual object size.
--
Catalin
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-09 15:03 ` Catalin Marinas
@ 2015-06-10 7:45 ` Zhang, Yanmin
2015-06-10 9:48 ` Catalin Marinas
0 siblings, 1 reply; 9+ messages in thread
From: Zhang, Yanmin @ 2015-06-10 7:45 UTC (permalink / raw)
To: Catalin Marinas
Cc: Christoph Lameter, Liu, XinwuX, penberg@kernel.org,
mpm@selenic.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org,
He, Bo, Chen, Lin Z
On 2015/6/9 23:03, Catalin Marinas wrote:
> On Tue, Jun 09, 2015 at 09:10:45AM +0100, Zhang, Yanmin wrote:
>> On 2015/6/8 18:13, Catalin Marinas wrote:
>>> As I replied already, I don't think this is that bad, or at least not
>>> worse than what kmemleak already does (looking at all data whether it's
>>> pointer or not).
>> It depends. As for memleak, developers prefers there are false alarms instead
>> of missing some leaked memory.
> Lots of false positives aren't that nice, you spend a lot of time
> debugging them (I've been there in the early kmemleak days). Anyway,
> your use case is not about false positives vs. negatives but just false
> negatives.
>
> My point is that there is a lot of random, pointer-like data read by
> kmemleak even without this memset (e.g. thread stacks, non-pointer data
> in kmalloc'ed structures, data/bss sections). Just doing this memset may
> reduce the chance of false negatives a bit but I don't think it would be
> noticeable.
>
> If there is some serious memory leak (lots of objects), they would
> likely show up at some point. Even if it's a one-off leak, it's possible
> that it shows up after some time (e.g. the object pointing to this
> memory block is freed).
>
>>> It also doesn't solve the kmem_cache_alloc() case where
>>> the original object size is no longer available.
>> Such issue around kmem_cache_alloc() case happens only when the
>> caller doesn't initialize or use the full object, so the object keeps
>> old dirty data.
> The kmem_cache blocks size would be aligned to a cache line, so you
> still have some extra bytes never touched by the caller.
>
>> This patch is to resolve the redundant unused space (more than object size)
>> although the full object is used by kernel.
> So this solves only the cases where the original object size is still
> known (e.g. kmalloc). It could also be solved by telling kmemleak the
> actual object size.
Your explanation is reasonable. The patch is for debug purpose.
Maintainers can make decision based on balance.
Xinwu is a new developer in kernel community. Accepting the patch
into kernel can encourage him definitely. :)
Yanmin
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-10 7:45 ` Zhang, Yanmin
@ 2015-06-10 9:48 ` Catalin Marinas
2015-06-11 8:18 ` Liu, XinwuX
0 siblings, 1 reply; 9+ messages in thread
From: Catalin Marinas @ 2015-06-10 9:48 UTC (permalink / raw)
To: Zhang, Yanmin
Cc: Christoph Lameter, Liu, XinwuX, penberg@kernel.org,
mpm@selenic.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org,
He, Bo, Chen, Lin Z
On Wed, Jun 10, 2015 at 08:45:50AM +0100, Zhang, Yanmin wrote:
> On 2015/6/9 23:03, Catalin Marinas wrote:
> > On Tue, Jun 09, 2015 at 09:10:45AM +0100, Zhang, Yanmin wrote:
> >> On 2015/6/8 18:13, Catalin Marinas wrote:
> >>> As I replied already, I don't think this is that bad, or at least not
> >>> worse than what kmemleak already does (looking at all data whether it's
> >>> pointer or not).
> >> It depends. As for memleak, developers prefers there are false alarms instead
> >> of missing some leaked memory.
> > Lots of false positives aren't that nice, you spend a lot of time
> > debugging them (I've been there in the early kmemleak days). Anyway,
> > your use case is not about false positives vs. negatives but just false
> > negatives.
> >
> > My point is that there is a lot of random, pointer-like data read by
> > kmemleak even without this memset (e.g. thread stacks, non-pointer data
> > in kmalloc'ed structures, data/bss sections). Just doing this memset may
> > reduce the chance of false negatives a bit but I don't think it would be
> > noticeable.
> >
> > If there is some serious memory leak (lots of objects), they would
> > likely show up at some point. Even if it's a one-off leak, it's possible
> > that it shows up after some time (e.g. the object pointing to this
> > memory block is freed).
> >
> >>> It also doesn't solve the kmem_cache_alloc() case where
> >>> the original object size is no longer available.
> >> Such issue around kmem_cache_alloc() case happens only when the
> >> caller doesn't initialize or use the full object, so the object keeps
> >> old dirty data.
> > The kmem_cache blocks size would be aligned to a cache line, so you
> > still have some extra bytes never touched by the caller.
> >
> >> This patch is to resolve the redundant unused space (more than object size)
> >> although the full object is used by kernel.
> > So this solves only the cases where the original object size is still
> > known (e.g. kmalloc). It could also be solved by telling kmemleak the
> > actual object size.
>
> Your explanation is reasonable. The patch is for debug purpose.
> Maintainers can make decision based on balance.
The patch, as it stands, should not go in:
- too much code duplication (I already commented that a function
similar to kmemleak_erase would look much better)
- I don't think there is a noticeable benefit but happy to be proven
wrong
- there are other ways of achieving the same
> Xinwu is a new developer in kernel community. Accepting the patch
> into kernel can encourage him definitely. :)
As would constructive feedback ;)
That said, it would probably be more beneficial to be able to tell
kmemleak of the actual object size via another callback. This solves the
scanning of the extra data in a slab, restricts pointer values
referencing the object and better identification of the leaked objects
(by printing its real size). Two options:
a) Use the existing kmemleak_free_part() function to free the end of the
slab. This was originally meant for memblock freeing but can be
improved slightly to avoid creating a new object and deleting the old
one when only the last part of the block is freed.
b) Implement a new kmemleak_set_size(const void *ptr, size_t size). All
it needs to do is update the object->size value, no need for
re-inserting into the rb-tree.
Option (b) is probably better, especially with the latest patches I
posted where kmemleak_free*() always deletes the original object.
--
Catalin
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* RE: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-10 9:48 ` Catalin Marinas
@ 2015-06-11 8:18 ` Liu, XinwuX
0 siblings, 0 replies; 9+ messages in thread
From: Liu, XinwuX @ 2015-06-11 8:18 UTC (permalink / raw)
To: Catalin Marinas, Zhang, Yanmin
Cc: Christoph Lameter, penberg@kernel.org, mpm@selenic.com,
linux-mm@kvack.org, linux-kernel@vger.kernel.org, He, Bo,
Chen, Lin Z
when kernel uses kmalloc to allocate memory, slub/slab will find
a suitable kmem_cache. The cache's object size is often greater than
requested size. There is unused space which contains dirty data. These
dirty data might have pointers pointing to a block of leaked memory.
Kernel wouldn't consider this memory as leaked when scanning kmemleak object.
The patch fixes it by updating kmemleak object size with requested size,
so kmemleak won't scan the unused space.
Signed-off-by: Chen Lin Z <lin.z.chen@intel.com>
Signed-off-by: Liu, XinwuX <xinwux.liu@intel.com>
---
include/linux/kmemleak.h | 4 ++++
mm/kmemleak.c | 40 +++++++++++++++++++++++++++++++++++++++-
mm/slab.c | 11 ++++++++++-
mm/slub.c | 12 ++++++++++++
4 files changed, 65 insertions(+), 2 deletions(-)
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index e705467..cc35a2f 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -37,6 +37,7 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
+extern void kmemleak_set_size(const void *ptr, size_t size) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags,
@@ -104,6 +105,9 @@ static inline void kmemleak_erase(void **ptr)
static inline void kmemleak_no_scan(const void *ptr)
{
}
+static inline void kmemleak_set_size(const void *ptr, size_t size)
+{
+}
#endif /* CONFIG_DEBUG_KMEMLEAK */
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f0fe4f2..487086e 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -241,7 +241,8 @@ enum {
KMEMLEAK_NOT_LEAK,
KMEMLEAK_IGNORE,
KMEMLEAK_SCAN_AREA,
- KMEMLEAK_NO_SCAN
+ KMEMLEAK_NO_SCAN,
+ KMEMLEAK_SET_SIZE
};
/*
@@ -799,6 +800,23 @@ static void object_no_scan(unsigned long ptr)
}
/*
+ * Set the size for an allocated object.
+ */
+static void __object_set_size(unsigned long ptr, size_t size)
+{
+ unsigned long flags;
+ struct kmemleak_object *object;
+
+ object = find_and_get_object(ptr, 0);
+ if (!object) {
+ kmemleak_warn("Try to set unknown object at 0x%08lx\n", ptr);
+ return;
+ }
+ object->size = size;
+ put_object(object);
+}
+
+/*
* Log an early kmemleak_* call to the early_log buffer. These calls will be
* processed later once kmemleak is fully initialized.
*/
@@ -1105,6 +1123,23 @@ void __ref kmemleak_no_scan(const void *ptr)
}
EXPORT_SYMBOL(kmemleak_no_scan);
+/**
+ * kmemleak_set_size - set an allocated object's size
+ * @ptr: pointer to beginning of the object
+ * @size: the new size of the allocated object
+ *
+ * The function need to be called before allocation function returns.
+ */
+void __ref kmemleak_set_size(const void *ptr, size_t size)
+{
+
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+ __object_set_size((unsigned long)ptr, size);
+ else if (kmemleak_early_log)
+ log_early(KMEMLEAK_SET_SIZE, ptr, size, 0);
+}
+EXPORT_SYMBOL(kmemleak_set_size);
+
/*
* Update an object's checksum and return true if it was modified.
*/
@@ -1880,6 +1915,9 @@ void __init kmemleak_init(void)
case KMEMLEAK_NO_SCAN:
kmemleak_no_scan(log->ptr);
break;
+ case KMEMLEAK_SET_SIZE:
+ kmemleak_set_size(log->ptr, log->size);
+ break;
default:
kmemleak_warn("Unknown early log operation: %d\n",
log->op_type);
diff --git a/mm/slab.c b/mm/slab.c
index 7eb38dd..90bc4fe 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3476,11 +3476,17 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;
+ void *ret;
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+ ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
+
+ if (size < cachep->object_size)
+ kmemleak_set_size(ret, size);
+
+ return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3517,6 +3523,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
trace_kmalloc(caller, ret,
size, cachep->size, flags);
+ if (size < cachep->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
diff --git a/mm/slub.c b/mm/slub.c
index 54c0876..4ef17e5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3321,6 +3321,9 @@ void *__kmalloc(size_t size, gfp_t flags)
kasan_kmalloc(s, ret, size);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc);
@@ -3366,6 +3369,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
kasan_kmalloc(s, ret, size);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
@@ -3823,6 +3829,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
@@ -3853,6 +3862,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
#endif
--
1.9.1
-----Original Message-----
From: Catalin Marinas [mailto:catalin.marinas@arm.com]
Sent: Wednesday, June 10, 2015 5:49 PM
To: Zhang, Yanmin
Cc: Christoph Lameter; Liu, XinwuX; penberg@kernel.org; mpm@selenic.com; linux-mm@kvack.org; linux-kernel@vger.kernel.org; He, Bo; Chen, Lin Z
Subject: Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
On Wed, Jun 10, 2015 at 08:45:50AM +0100, Zhang, Yanmin wrote:
> On 2015/6/9 23:03, Catalin Marinas wrote:
> > On Tue, Jun 09, 2015 at 09:10:45AM +0100, Zhang, Yanmin wrote:
> >> On 2015/6/8 18:13, Catalin Marinas wrote:
> >>> As I replied already, I don't think this is that bad, or at least
> >>> not worse than what kmemleak already does (looking at all data
> >>> whether it's pointer or not).
> >> It depends. As for memleak, developers prefers there are false
> >> alarms instead of missing some leaked memory.
> > Lots of false positives aren't that nice, you spend a lot of time
> > debugging them (I've been there in the early kmemleak days). Anyway,
> > your use case is not about false positives vs. negatives but just
> > false negatives.
> >
> > My point is that there is a lot of random, pointer-like data read by
> > kmemleak even without this memset (e.g. thread stacks, non-pointer
> > data in kmalloc'ed structures, data/bss sections). Just doing this
> > memset may reduce the chance of false negatives a bit but I don't
> > think it would be noticeable.
> >
> > If there is some serious memory leak (lots of objects), they would
> > likely show up at some point. Even if it's a one-off leak, it's
> > possible that it shows up after some time (e.g. the object pointing
> > to this memory block is freed).
> >
> >>> It also doesn't solve the kmem_cache_alloc() case where the
> >>> original object size is no longer available.
> >> Such issue around kmem_cache_alloc() case happens only when the
> >> caller doesn't initialize or use the full object, so the object
> >> keeps old dirty data.
> > The kmem_cache blocks size would be aligned to a cache line, so you
> > still have some extra bytes never touched by the caller.
> >
> >> This patch is to resolve the redundant unused space (more than
> >> object size) although the full object is used by kernel.
> > So this solves only the cases where the original object size is
> > still known (e.g. kmalloc). It could also be solved by telling
> > kmemleak the actual object size.
>
> Your explanation is reasonable. The patch is for debug purpose.
> Maintainers can make decision based on balance.
The patch, as it stands, should not go in:
- too much code duplication (I already commented that a function
similar to kmemleak_erase would look much better)
- I don't think there is a noticeable benefit but happy to be proven
wrong
- there are other ways of achieving the same
> Xinwu is a new developer in kernel community. Accepting the patch into
> kernel can encourage him definitely. :)
As would constructive feedback ;)
That said, it would probably be more beneficial to be able to tell kmemleak of the actual object size via another callback. This solves the scanning of the extra data in a slab, restricts pointer values referencing the object and better identification of the leaked objects (by printing its real size). Two options:
a) Use the existing kmemleak_free_part() function to free the end of the
slab. This was originally meant for memblock freeing but can be
improved slightly to avoid creating a new object and deleting the old
one when only the last part of the block is freed.
b) Implement a new kmemleak_set_size(const void *ptr, size_t size). All
it needs to do is update the object->size value, no need for
re-inserting into the rb-tree.
Option (b) is probably better, especially with the latest patches I posted where kmemleak_free*() always deletes the original object.
--
Catalin
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH] slub/slab: fix kmemleak didn't work on some case
2015-06-08 5:14 [PATCH] slub/slab: fix kmemleak didn't work on some case Liu, XinwuX
2015-06-08 9:38 ` Christoph Lameter
@ 2015-06-08 10:03 ` Catalin Marinas
1 sibling, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2015-06-08 10:03 UTC (permalink / raw)
To: Liu, XinwuX
Cc: cl@linux-foundation.org, penberg@kernel.org, mpm@selenic.com,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
yanmin_zhang@linux.intel.com, He, Bo, Chen, Lin Z
On Mon, Jun 08, 2015 at 06:14:32AM +0100, Liu, XinwuX wrote:
> when kernel uses kmalloc to allocate memory, slub/slab will find
> a suitable kmem_cache. Ususally the cache's object size is often
> greater than requested size. There is unused space which contains
> dirty data. These dirty data might have pointers pointing to a block
> of leaked memory. Kernel wouldn't consider this memory as leaked when
> scanning kmemleak object.
>
> The patch fixes it by clearing the unused memory.
In general, I'm not bothered about this. We may miss a leak or two but
in my experience they eventually show up at some point. Have you seen
any real leaks not being reported because of this? Note that we already
have a lot of non-pointer data that is scanned by kmemleak (it can't
distinguish which members are pointers in a data structure).
> mm/slab.c | 22 +++++++++++++++++++++-
> mm/slub.c | 35 +++++++++++++++++++++++++++++++++++
> 2 files changed, 56 insertions(+), 1 deletion(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index 7eb38dd..ef25e7d 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3423,6 +3423,12 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
> ret = slab_alloc(cachep, flags, _RET_IP_);
> +#ifdef CONFIG_DEBUG_KMEMLEAK
> + int delta = cachep->object_size - size;
> +
> + if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
> + memset((void *)((char *)ret + size), 0, delta);
> +#endif
On the implementation side, there is too much code duplication. I would
rather add something like the kmemleak_erase(), e.g.
kmemleak_erase_range(addr, object_size, actual_size) which is an empty
static inline when !CONFIG_DEBUG_KMEMLEAK.
Kmemleak already has an API for similar cases, kmemleak_scan_area().
While this allocates an extra structure, it could be adapted to only
change some of the object properties. However, the rb tree lookup is
probably still slower than a memset().
--
Catalin
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2015-06-11 8:19 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-06-08 5:14 [PATCH] slub/slab: fix kmemleak didn't work on some case Liu, XinwuX
2015-06-08 9:38 ` Christoph Lameter
2015-06-08 10:13 ` Catalin Marinas
2015-06-09 8:10 ` Zhang, Yanmin
2015-06-09 15:03 ` Catalin Marinas
2015-06-10 7:45 ` Zhang, Yanmin
2015-06-10 9:48 ` Catalin Marinas
2015-06-11 8:18 ` Liu, XinwuX
2015-06-08 10:03 ` Catalin Marinas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).