* [PATCH 0/4] Kmemleak patches for 2.6.33
@ 2009-10-19 14:49 Catalin Marinas
2009-10-19 14:49 ` [PATCH 1/4] kmemleak: Simplify the kmemleak_scan_area() function prototype Catalin Marinas
` (3 more replies)
0 siblings, 4 replies; 8+ messages in thread
From: Catalin Marinas @ 2009-10-19 14:49 UTC (permalink / raw)
To: linux-kernel
This is a series of kmemleak patches for the upcoming merging window.
The final patch in the series changes the leak reporting logic a bit to
reduce the transient false positives.
If there are no objections, I'll push them to the -next tree. Thanks.
Catalin Marinas (4):
kmemleak: Simplify the kmemleak_scan_area() function prototype
kmemleak: Scan the _ftrace_events section in modules
kmemleak: Store object reverse references for debugging purposes
kmemleak: Allow two scanning passes before reported an object as leak
include/linux/kmemleak.h | 8 +--
kernel/module.c | 13 +++-
mm/kmemleak.c | 141 ++++++++++++++++++----------------------------
mm/slab.c | 4 +
4 files changed, 68 insertions(+), 98 deletions(-)
--
Catalin
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 1/4] kmemleak: Simplify the kmemleak_scan_area() function prototype
2009-10-19 14:49 [PATCH 0/4] Kmemleak patches for 2.6.33 Catalin Marinas
@ 2009-10-19 14:49 ` Catalin Marinas
2009-10-19 14:49 ` [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules Catalin Marinas
` (2 subsequent siblings)
3 siblings, 0 replies; 8+ messages in thread
From: Catalin Marinas @ 2009-10-19 14:49 UTC (permalink / raw)
To: linux-kernel
This function was taking non-necessary arguments which can be determined
by kmemleak. The patch also modifies the calling sites.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
include/linux/kmemleak.h | 8 ++++----
kernel/module.c | 7 ++-----
mm/kmemleak.c | 49 ++++++++++++++++++++--------------------------
mm/slab.c | 4 ++--
4 files changed, 29 insertions(+), 39 deletions(-)
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 3c7497d..00888b9 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -32,8 +32,8 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
-extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp) __ref;
+extern void kmemleak_scan_area(const void *ptr, size_t length,
+ gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -84,8 +84,8 @@ static inline void kmemleak_not_leak(const void *ptr)
static inline void kmemleak_ignore(const void *ptr)
{
}
-static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+static inline void kmemleak_scan_area(const void *ptr, size_t length,
+ gfp_t gfp)
{
}
static inline void kmemleak_erase(void **ptr)
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d880..1eb9520 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2043,9 +2043,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
unsigned int i;
/* only scan the sections containing data */
- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
- (unsigned long)mod->module_core,
- sizeof(struct module), GFP_KERNEL);
+ kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
for (i = 1; i < hdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -2054,8 +2052,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
&& strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
continue;
- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
- (unsigned long)mod->module_core,
+ kmemleak_scan_area((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size, GFP_KERNEL);
}
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 8bf765c..9610635 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -119,8 +119,8 @@
/* scanning area inside a memory block */
struct kmemleak_scan_area {
struct hlist_node node;
- unsigned long offset;
- size_t length;
+ unsigned long start;
+ size_t size;
};
#define KMEMLEAK_GREY 0
@@ -241,8 +241,6 @@ struct early_log {
const void *ptr; /* allocated/freed memory block */
size_t size; /* memory block size */
int min_count; /* minimum reference count */
- unsigned long offset; /* scan area offset */
- size_t length; /* scan area length */
unsigned long trace[MAX_TRACE]; /* stack trace */
unsigned int trace_len; /* stack trace length */
};
@@ -720,14 +718,13 @@ static void make_black_object(unsigned long ptr)
* Add a scanning area to the object. If at least one such area is added,
* kmemleak will only scan these ranges rather than the whole memory block.
*/
-static void add_scan_area(unsigned long ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
struct kmemleak_scan_area *area;
- object = find_and_get_object(ptr, 0);
+ object = find_and_get_object(ptr, 1);
if (!object) {
kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
ptr);
@@ -741,7 +738,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
}
spin_lock_irqsave(&object->lock, flags);
- if (offset + length > object->size) {
+ if (ptr + size > object->pointer + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
@@ -749,8 +746,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
}
INIT_HLIST_NODE(&area->node);
- area->offset = offset;
- area->length = length;
+ area->start = ptr;
+ area->size = size;
hlist_add_head(&area->node, &object->area_list);
out_unlock:
@@ -786,7 +783,7 @@ static void object_no_scan(unsigned long ptr)
* processed later once kmemleak is fully initialized.
*/
static void __init log_early(int op_type, const void *ptr, size_t size,
- int min_count, unsigned long offset, size_t length)
+ int min_count)
{
unsigned long flags;
struct early_log *log;
@@ -808,8 +805,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
log->ptr = ptr;
log->size = size;
log->min_count = min_count;
- log->offset = offset;
- log->length = length;
if (op_type == KMEMLEAK_ALLOC)
log->trace_len = __save_stack_trace(log->trace);
crt_early_log++;
@@ -858,7 +853,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
create_object((unsigned long)ptr, size, min_count, gfp);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
+ log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -873,7 +868,7 @@ void __ref kmemleak_free(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_FREE, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -888,7 +883,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
delete_object_part((unsigned long)ptr, size);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
+ log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -903,7 +898,7 @@ void __ref kmemleak_not_leak(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
make_gray_object((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_not_leak);
@@ -919,22 +914,21 @@ void __ref kmemleak_ignore(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
make_black_object((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_ignore);
/*
* Limit the range to be scanned in an allocated memory block.
*/
-void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- add_scan_area((unsigned long)ptr, offset, length, gfp);
+ add_scan_area((unsigned long)ptr, size, gfp);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
+ log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
}
EXPORT_SYMBOL(kmemleak_scan_area);
@@ -948,7 +942,7 @@ void __ref kmemleak_no_scan(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
object_no_scan((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_no_scan);
@@ -1075,9 +1069,9 @@ static void scan_object(struct kmemleak_object *object)
}
} else
hlist_for_each_entry(area, elem, &object->area_list, node)
- scan_block((void *)(object->pointer + area->offset),
- (void *)(object->pointer + area->offset
- + area->length), object, 0);
+ scan_block((void *)area->start,
+ (void *)(area->start + area->size),
+ object, 0);
out:
spin_unlock_irqrestore(&object->lock, flags);
}
@@ -1642,8 +1636,7 @@ void __init kmemleak_init(void)
kmemleak_ignore(log->ptr);
break;
case KMEMLEAK_SCAN_AREA:
- kmemleak_scan_area(log->ptr, log->offset, log->length,
- GFP_KERNEL);
+ kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
break;
case KMEMLEAK_NO_SCAN:
kmemleak_no_scan(log->ptr);
diff --git a/mm/slab.c b/mm/slab.c
index 646db30..d2713a9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2584,8 +2584,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
* kmemleak does not treat the ->s_mem pointer as a reference
* to the object. Otherwise we will not report the leak.
*/
- kmemleak_scan_area(slabp, offsetof(struct slab, list),
- sizeof(struct list_head), local_flags);
+ kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
+ local_flags);
if (!slabp)
return NULL;
} else {
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules
2009-10-19 14:49 [PATCH 0/4] Kmemleak patches for 2.6.33 Catalin Marinas
2009-10-19 14:49 ` [PATCH 1/4] kmemleak: Simplify the kmemleak_scan_area() function prototype Catalin Marinas
@ 2009-10-19 14:49 ` Catalin Marinas
2009-10-20 0:54 ` Rusty Russell
2009-10-19 14:49 ` [PATCH 3/4] kmemleak: Store object reverse references for debugging purposes Catalin Marinas
2009-10-19 14:49 ` [PATCH 4/4] kmemleak: Allow two scanning passes before reported an object as leak Catalin Marinas
3 siblings, 1 reply; 8+ messages in thread
From: Catalin Marinas @ 2009-10-19 14:49 UTC (permalink / raw)
To: linux-kernel; +Cc: Rusty Russell, Zdenek Kabelac
This section contains pointers to allocated objects and not scanning it
leads to false positives.
Reported-by: Zdenek Kabelac <zdenek.kabelac@gmail.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
kernel/module.c | 6 ++++++
1 files changed, 6 insertions(+), 0 deletions(-)
diff --git a/kernel/module.c b/kernel/module.c
index 1eb9520..dd29ba4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2380,6 +2380,12 @@ static noinline struct module *load_module(void __user *umod,
"_ftrace_events",
sizeof(*mod->trace_events),
&mod->num_trace_events);
+ /*
+ * This section contains pointers to allocated objects in the trace
+ * code and not scanning it leads to false positives.
+ */
+ kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
+ mod->num_trace_events, GFP_KERNEL);
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 3/4] kmemleak: Store object reverse references for debugging purposes
2009-10-19 14:49 [PATCH 0/4] Kmemleak patches for 2.6.33 Catalin Marinas
2009-10-19 14:49 ` [PATCH 1/4] kmemleak: Simplify the kmemleak_scan_area() function prototype Catalin Marinas
2009-10-19 14:49 ` [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules Catalin Marinas
@ 2009-10-19 14:49 ` Catalin Marinas
2009-10-19 14:49 ` [PATCH 4/4] kmemleak: Allow two scanning passes before reported an object as leak Catalin Marinas
3 siblings, 0 replies; 8+ messages in thread
From: Catalin Marinas @ 2009-10-19 14:49 UTC (permalink / raw)
To: linux-kernel
There are some memory leak reports which come and go but are hard to
identify whether they are false positives or not. This patch stores up
to 4 reverse references in an object so that once a leak disappeared,
using "echo dump=<ptr> > debug/kmemleak" would list where an object is
referenced from.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
mm/kmemleak.c | 15 ++++++++++++++-
1 files changed, 14 insertions(+), 1 deletions(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 9610635..998162f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -110,6 +110,7 @@
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
+#define MAX_REV_REF 4 /* number of reverse references */
#define BYTES_PER_POINTER sizeof(void *)
@@ -156,6 +157,7 @@ struct kmemleak_object {
unsigned long jiffies; /* creation timestamp */
pid_t pid; /* pid of the current task */
char comm[TASK_COMM_LEN]; /* executable name */
+ void *rev_ref[MAX_REV_REF]; /* reverse references */
};
/* flag representing the memory block allocation status */
@@ -379,6 +381,13 @@ static void dump_object_info(struct kmemleak_object *object)
pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count);
pr_notice(" flags = 0x%lx\n", object->flags);
+ if (object->count) {
+ int i;
+ pr_notice(" referred from:");
+ for (i = 0; i < object->count; i++)
+ printk(" 0x%p", object->rev_ref[i]);
+ printk("\n");
+ }
pr_notice(" backtrace:\n");
print_stack_trace(&trace, 4);
}
@@ -1011,8 +1020,12 @@ static void scan_block(void *_start, void *_end,
*/
spin_lock_irqsave_nested(&object->lock, flags,
SINGLE_DEPTH_NESTING);
+ if (object->count < MAX_REV_REF)
+ object->rev_ref[object->count] = ptr;
+
if (!color_white(object)) {
/* non-orphan, ignored or new */
+ object->count++;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
continue;
@@ -1416,7 +1429,7 @@ static int dump_str_object_info(const char *str)
unsigned long addr;
addr= simple_strtoul(str, NULL, 0);
- object = find_and_get_object(addr, 0);
+ object = find_and_get_object(addr, 1);
if (!object) {
pr_info("Unknown object at 0x%08lx\n", addr);
return -EINVAL;
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 4/4] kmemleak: Allow two scanning passes before reported an object as leak
2009-10-19 14:49 [PATCH 0/4] Kmemleak patches for 2.6.33 Catalin Marinas
` (2 preceding siblings ...)
2009-10-19 14:49 ` [PATCH 3/4] kmemleak: Store object reverse references for debugging purposes Catalin Marinas
@ 2009-10-19 14:49 ` Catalin Marinas
3 siblings, 0 replies; 8+ messages in thread
From: Catalin Marinas @ 2009-10-19 14:49 UTC (permalink / raw)
To: linux-kernel
The majority of the transient false positives in kmemleak are caused by
pointers being moved to a data structure after kmemleak scanned it
during a scanning episode. This may cause a false positive report on the
object pointed to by such pointer. This patch simplifies the scanning
algorithm for objects allocated during a scanning episode and instead
requires an object to be found as a leak twice consecutively before
being reported.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
mm/kmemleak.c | 77 ++++++++++++++-------------------------------------------
1 files changed, 19 insertions(+), 58 deletions(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 998162f..efdd4b5 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -108,7 +108,6 @@
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
-#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
#define MAX_REV_REF 4 /* number of reverse references */
@@ -166,8 +165,8 @@ struct kmemleak_object {
#define OBJECT_REPORTED (1 << 1)
/* flag set to not scan the object */
#define OBJECT_NO_SCAN (1 << 2)
-/* flag set on newly allocated objects */
-#define OBJECT_NEW (1 << 3)
+/* flag set on unreferenced objects during first a scanning pass */
+#define OBJECT_FIRST_PASS (1 << 3)
/* number of bytes to print per line; must be 16 or 32 */
#define HEX_ROW_SIZE 16
@@ -205,9 +204,6 @@ static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
static struct task_struct *scan_thread;
-/* used to avoid reporting of recently allocated objects */
-static unsigned long jiffies_min_age;
-static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
@@ -323,21 +319,13 @@ static bool color_gray(const struct kmemleak_object *object)
object->count >= object->min_count;
}
-static bool color_black(const struct kmemleak_object *object)
-{
- return object->min_count == KMEMLEAK_BLACK;
-}
-
/*
- * Objects are considered unreferenced only if their color is white, they have
- * not be deleted and have a minimum age to avoid false positives caused by
- * pointers temporarily stored in CPU registers.
+ * Objects are considered unreferenced only if their color is white and they
+ * have not been deleted.
*/
static bool unreferenced_object(struct kmemleak_object *object)
{
- return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
- time_before_eq(object->jiffies + jiffies_min_age,
- jiffies_last_scan);
+ return (object->flags & OBJECT_ALLOCATED) && color_white(object);
}
/*
@@ -529,11 +517,11 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
INIT_HLIST_HEAD(&object->area_list);
spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
+ object->flags = OBJECT_ALLOCATED;
object->pointer = ptr;
object->size = size;
object->min_count = min_count;
- object->count = -1; /* no color initially */
+ object->count = KMEMLEAK_BLACK; /* no white color initially */
object->jiffies = jiffies;
/* task information */
@@ -1100,9 +1088,6 @@ static void kmemleak_scan(void)
struct kmemleak_object *object, *tmp;
int i;
int new_leaks = 0;
- int gray_list_pass = 0;
-
- jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
@@ -1121,7 +1106,6 @@ static void kmemleak_scan(void)
#endif
/* reset the reference count (whiten the object) */
object->count = 0;
- object->flags &= ~OBJECT_NEW;
if (color_gray(object) && get_object(object))
list_add_tail(&object->gray_list, &gray_list);
@@ -1185,7 +1169,6 @@ static void kmemleak_scan(void)
* kmemleak objects cannot be freed from outside the loop because their
* use_count was increased.
*/
-repeat:
object = list_entry(gray_list.next, typeof(*object), gray_list);
while (&object->gray_list != &gray_list) {
cond_resched();
@@ -1203,38 +1186,12 @@ repeat:
object = tmp;
}
-
- if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
- goto scan_end;
-
- /*
- * Check for new objects allocated during this scanning and add them
- * to the gray list.
- */
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- spin_lock_irqsave(&object->lock, flags);
- if ((object->flags & OBJECT_NEW) && !color_black(object) &&
- get_object(object)) {
- object->flags &= ~OBJECT_NEW;
- list_add_tail(&object->gray_list, &gray_list);
- }
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
- if (!list_empty(&gray_list))
- goto repeat;
-
-scan_end:
WARN_ON(!list_empty(&gray_list));
/*
- * If scanning was stopped or new objects were being allocated at a
- * higher rate than gray list scanning, do not report any new
- * unreferenced objects.
+ * If scanning was stopped do not report any new unreferenced objects.
*/
- if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
+ if (scan_should_stop())
return;
/*
@@ -1243,11 +1200,16 @@ scan_end:
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
spin_lock_irqsave(&object->lock, flags);
- if (unreferenced_object(object) &&
- !(object->flags & OBJECT_REPORTED)) {
- object->flags |= OBJECT_REPORTED;
- new_leaks++;
- }
+ if (unreferenced_object(object)) {
+ if (!(object->flags & OBJECT_FIRST_PASS))
+ object->flags |= OBJECT_FIRST_PASS;
+ else if (!(object->flags & OBJECT_REPORTED)) {
+ /* previously found as leak, report it */
+ new_leaks++;
+ object->flags |= OBJECT_REPORTED;
+ }
+ } else
+ object->flags &= ~OBJECT_FIRST_PASS;
spin_unlock_irqrestore(&object->lock, flags);
}
rcu_read_unlock();
@@ -1609,7 +1571,6 @@ void __init kmemleak_init(void)
int i;
unsigned long flags;
- jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules
2009-10-19 14:49 ` [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules Catalin Marinas
@ 2009-10-20 0:54 ` Rusty Russell
2009-10-20 8:39 ` Catalin Marinas
0 siblings, 1 reply; 8+ messages in thread
From: Rusty Russell @ 2009-10-20 0:54 UTC (permalink / raw)
To: Catalin Marinas; +Cc: linux-kernel, Zdenek Kabelac
On Tue, 20 Oct 2009 01:19:29 am Catalin Marinas wrote:
> This section contains pointers to allocated objects and not scanning it
> leads to false positives.
Thanks, applied. Want me to push this for 2.6.32?
Rusty.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules
2009-10-20 0:54 ` Rusty Russell
@ 2009-10-20 8:39 ` Catalin Marinas
2009-10-21 0:17 ` Rusty Russell
0 siblings, 1 reply; 8+ messages in thread
From: Catalin Marinas @ 2009-10-20 8:39 UTC (permalink / raw)
To: Rusty Russell; +Cc: linux-kernel, Zdenek Kabelac
Rusty Russell <rusty@rustcorp.com.au> wrote:
> On Tue, 20 Oct 2009 01:19:29 am Catalin Marinas wrote:
>> This section contains pointers to allocated objects and not scanning it
>> leads to false positives.
>
> Thanks, applied. Want me to push this for 2.6.32?
This patch requires 1/4 to be applied as it changes the kmemleak API a
bit, so it won't even compile on its own. I can push them both
together with your ack for the second.
I'm not sure whether API changes make sense during -rc releases, I was
more thinking of pushing the patches for 2.6.33.
For the current kernel, I could send you patch with the current API,
though calling kmemleak_scan_area() for _ftrace_events is a bit ugly.
Thanks.
--
Catalin
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules
2009-10-20 8:39 ` Catalin Marinas
@ 2009-10-21 0:17 ` Rusty Russell
0 siblings, 0 replies; 8+ messages in thread
From: Rusty Russell @ 2009-10-21 0:17 UTC (permalink / raw)
To: Catalin Marinas; +Cc: linux-kernel, Zdenek Kabelac
On Tue, 20 Oct 2009 07:09:20 pm Catalin Marinas wrote:
> Rusty Russell <rusty@rustcorp.com.au> wrote:
> > On Tue, 20 Oct 2009 01:19:29 am Catalin Marinas wrote:
> >> This section contains pointers to allocated objects and not scanning it
> >> leads to false positives.
> >
> > Thanks, applied. Want me to push this for 2.6.32?
>
> This patch requires 1/4 to be applied as it changes the kmemleak API a
> bit, so it won't even compile on its own. I can push them both
> together with your ack for the second.
>
> I'm not sure whether API changes make sense during -rc releases, I was
> more thinking of pushing the patches for 2.6.33.
Definitely 2.6.33 material. And if it's tied to your other patches, best
for you to hold them all.
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Thanks,
Rusty.
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2009-10-21 0:17 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-10-19 14:49 [PATCH 0/4] Kmemleak patches for 2.6.33 Catalin Marinas
2009-10-19 14:49 ` [PATCH 1/4] kmemleak: Simplify the kmemleak_scan_area() function prototype Catalin Marinas
2009-10-19 14:49 ` [PATCH 2/4] kmemleak: Scan the _ftrace_events section in modules Catalin Marinas
2009-10-20 0:54 ` Rusty Russell
2009-10-20 8:39 ` Catalin Marinas
2009-10-21 0:17 ` Rusty Russell
2009-10-19 14:49 ` [PATCH 3/4] kmemleak: Store object reverse references for debugging purposes Catalin Marinas
2009-10-19 14:49 ` [PATCH 4/4] kmemleak: Allow two scanning passes before reported an object as leak Catalin Marinas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox