* [PATCH] debugobjects: fix lockdep warning #2
@ 2008-08-28 15:32 Vegard Nossum
2008-08-29 0:17 ` Andrew Morton
2008-08-29 10:42 ` [PATCH] debugobjects: fix lockdep warning #2 Thomas Gleixner
0 siblings, 2 replies; 6+ messages in thread
From: Vegard Nossum @ 2008-08-28 15:32 UTC (permalink / raw)
To: Ingo Molnar
Cc: Daniel J Blueman, Thomas Gleixner, Christoph Lameter,
Linus Torvalds, Rafael J. Wysocki, Linux Kernel Mailing List,
Adrian Bunk, Andrew Morton, Natalie Protasevich,
Kernel Testers List
On Thu, Aug 28, 2008 at 3:56 PM, Ingo Molnar <mingo@elte.hu> wrote:
> could you resend the final patch please? It's a candidate for .27, if it
> works out fine.
Here is the combined patch. I've tested it only briefly, and I am
unsure of whether it still produces lockdep warnings for Daniel or
not. I wish it would not be applied anywhere unless it was
officially Reviewed-by: someone. In particular, I'm not quite
steady with the irq-safe locking (Thomas might want to have a look).
Thanks :)
Vegard
>From 977cf583b79be7308d5e310711fe6038c8af96a4 Mon Sep 17 00:00:00 2001
From: Vegard Nossum <vegard.nossum@gmail.com>
Date: Thu, 28 Aug 2008 17:09:57 +0200
Subject: [PATCH] debugobjects: fix lockdep warning #2
Daniel J. Blueman reported:
> =======================================================
> [ INFO: possible circular locking dependency detected ]
> 2.6.27-rc4-224c #1
> -------------------------------------------------------
> hald/4680 is trying to acquire lock:
> (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80
>
> but task is already holding lock:
> (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>]
> debug_object_free+0x5c/0x120
We fix it by moving the actual freeing to outside the lock (the lock
now only protects the list).
The lock is also promoted to irq-safe (suggested by Dan).
Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
---
lib/debugobjects.c | 38 +++++++++++++++++++++++++++++---------
1 files changed, 29 insertions(+), 9 deletions(-)
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 19acf8c..acf9ed8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -115,9 +115,10 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
+ unsigned long flags;
struct debug_obj *obj = NULL;
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -136,7 +137,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
return obj;
}
@@ -146,18 +147,19 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
*/
static void free_object(struct debug_obj *obj)
{
+ unsigned long flags;
unsigned long idx = (unsigned long)(obj - obj_static_pool);
if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
} else {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
}
}
@@ -170,19 +172,28 @@ static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj *obj;
unsigned long flags;
int i;
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
+ /* XXX: Could probably be optimized by transplantation of more than
+ * one entry at a time. */
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
hlist_del(&obj->node);
- free_object(obj);
+ hlist_add_head(&obj->node, &freelist);
}
spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
}
}
@@ -500,8 +511,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
return;
default:
hlist_del(&obj->node);
+ spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
- break;
+ return;
}
out_unlock:
spin_unlock_irqrestore(&db->lock, flags);
@@ -512,6 +524,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
@@ -547,11 +560,18 @@ repeat:
goto repeat;
default:
hlist_del(&obj->node);
- free_object(obj);
+ hlist_add_head(&obj->node, &freelist);
break;
}
}
spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
+
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
}
--
1.5.5.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH] debugobjects: fix lockdep warning #2
2008-08-28 15:32 [PATCH] debugobjects: fix lockdep warning #2 Vegard Nossum
@ 2008-08-29 0:17 ` Andrew Morton
2008-08-29 14:02 ` Thomas Gleixner
2008-08-29 21:57 ` Daniel J Blueman
2008-08-29 10:42 ` [PATCH] debugobjects: fix lockdep warning #2 Thomas Gleixner
1 sibling, 2 replies; 6+ messages in thread
From: Andrew Morton @ 2008-08-29 0:17 UTC (permalink / raw)
To: Vegard Nossum
Cc: mingo, daniel.blueman, tglx, clameter, torvalds, rjw,
linux-kernel, bunk, protasnb, kernel-testers
On Thu, 28 Aug 2008 17:32:14 +0200
Vegard Nossum <vegard.nossum@gmail.com> wrote:
> On Thu, Aug 28, 2008 at 3:56 PM, Ingo Molnar <mingo@elte.hu> wrote:
> > could you resend the final patch please? It's a candidate for .27, if it
> > works out fine.
>
> Here is the combined patch. I've tested it only briefly, and I am
> unsure of whether it still produces lockdep warnings for Daniel or
> not. I wish it would not be applied anywhere unless it was
> officially Reviewed-by: someone. In particular, I'm not quite
> steady with the irq-safe locking (Thomas might want to have a look).
>
It all looks good to me.
>
>
> >From 977cf583b79be7308d5e310711fe6038c8af96a4 Mon Sep 17 00:00:00 2001
> From: Vegard Nossum <vegard.nossum@gmail.com>
> Date: Thu, 28 Aug 2008 17:09:57 +0200
> Subject: [PATCH] debugobjects: fix lockdep warning #2
>
> Daniel J. Blueman reported:
> > =======================================================
> > [ INFO: possible circular locking dependency detected ]
> > 2.6.27-rc4-224c #1
> > -------------------------------------------------------
> > hald/4680 is trying to acquire lock:
> > (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80
> >
> > but task is already holding lock:
> > (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>]
> > debug_object_free+0x5c/0x120
>
> We fix it by moving the actual freeing to outside the lock (the lock
> now only protects the list).
>
> The lock is also promoted to irq-safe (suggested by Dan).
What was the reason for this other change? I'm sure Dan is a fine chap,
but we usually prefer a little more justification for changes ;)
> Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
> Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
> ---
> lib/debugobjects.c | 38 +++++++++++++++++++++++++++++---------
> 1 files changed, 29 insertions(+), 9 deletions(-)
>
> diff --git a/lib/debugobjects.c b/lib/debugobjects.c
> index 19acf8c..acf9ed8 100644
> --- a/lib/debugobjects.c
> +++ b/lib/debugobjects.c
> @@ -115,9 +115,10 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
> static struct debug_obj *
> alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
> {
> + unsigned long flags;
> struct debug_obj *obj = NULL;
>
> - spin_lock(&pool_lock);
> + spin_lock_irqsave(&pool_lock, flags);
> if (obj_pool.first) {
> obj = hlist_entry(obj_pool.first, typeof(*obj), node);
>
> @@ -136,7 +137,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
> if (obj_pool_free < obj_pool_min_free)
> obj_pool_min_free = obj_pool_free;
> }
> - spin_unlock(&pool_lock);
> + spin_unlock_irqrestore(&pool_lock, flags);
>
> return obj;
> }
> @@ -146,18 +147,19 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
> */
> static void free_object(struct debug_obj *obj)
> {
> + unsigned long flags;
> unsigned long idx = (unsigned long)(obj - obj_static_pool);
>
> if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
> - spin_lock(&pool_lock);
> + spin_lock_irqsave(&pool_lock, flags);
> hlist_add_head(&obj->node, &obj_pool);
> obj_pool_free++;
> obj_pool_used--;
> - spin_unlock(&pool_lock);
> + spin_unlock_irqrestore(&pool_lock, flags);
> } else {
> - spin_lock(&pool_lock);
> + spin_lock_irqsave(&pool_lock, flags);
> obj_pool_used--;
> - spin_unlock(&pool_lock);
> + spin_unlock_irqrestore(&pool_lock, flags);
> kmem_cache_free(obj_cache, obj);
> }
> }
> @@ -170,19 +172,28 @@ static void debug_objects_oom(void)
> {
> struct debug_bucket *db = obj_hash;
> struct hlist_node *node, *tmp;
> + HLIST_HEAD(freelist);
> struct debug_obj *obj;
> unsigned long flags;
> int i;
>
> printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
>
> + /* XXX: Could probably be optimized by transplantation of more than
> + * one entry at a time. */
> for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
> spin_lock_irqsave(&db->lock, flags);
> hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
> hlist_del(&obj->node);
> - free_object(obj);
> + hlist_add_head(&obj->node, &freelist);
> }
> spin_unlock_irqrestore(&db->lock, flags);
> +
> + /* Now free them */
> + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
> + hlist_del(&obj->node);
> + free_object(obj);
I suspect that we can avoid the hlist_del() here, perhaps with a little
effort.
> +
> + /* Now free them */
> + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
> + hlist_del(&obj->node);
> + free_object(obj);
> + }
> +
and the other one.
But I'm not sure that it's worth putting effort into - leaving dead
objects strung onto a partially-live list is a little bit smelly IMO.
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH] debugobjects: fix lockdep warning #2
2008-08-29 0:17 ` Andrew Morton
@ 2008-08-29 14:02 ` Thomas Gleixner
2008-08-29 21:57 ` Daniel J Blueman
1 sibling, 0 replies; 6+ messages in thread
From: Thomas Gleixner @ 2008-08-29 14:02 UTC (permalink / raw)
To: Andrew Morton
Cc: Vegard Nossum, mingo, daniel.blueman, clameter, torvalds, rjw,
linux-kernel, bunk, protasnb, kernel-testers
On Thu, 28 Aug 2008, Andrew Morton wrote:
> >
> > The lock is also promoted to irq-safe (suggested by Dan).
>
> What was the reason for this other change? I'm sure Dan is a fine chap,
> but we usually prefer a little more justification for changes ;)
I added the info already, when I applied it to the tip-tree.
> > + /* Now free them */
> > + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
> > + hlist_del(&obj->node);
> > + free_object(obj);
>
> I suspect that we can avoid the hlist_del() here, perhaps with a little
> effort.
>
> > +
> > + /* Now free them */
> > + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
> > + hlist_del(&obj->node);
> > + free_object(obj);
> > + }
> > +
>
> and the other one.
>
> But I'm not sure that it's worth putting effort into - leaving dead
> objects strung onto a partially-live list is a little bit smelly IMO.
I really feel better, when we delete them instead of throwing them
away with pointers to each other.
Thanks,
tglx
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH] debugobjects: fix lockdep warning #2
2008-08-29 0:17 ` Andrew Morton
2008-08-29 14:02 ` Thomas Gleixner
@ 2008-08-29 21:57 ` Daniel J Blueman
2008-09-01 10:59 ` [GIT pull] debugobject fixes for 2.6.27 Thomas Gleixner
1 sibling, 1 reply; 6+ messages in thread
From: Daniel J Blueman @ 2008-08-29 21:57 UTC (permalink / raw)
To: Andrew Morton, Vegard Nossum, Ingo Molnar
Cc: tglx, clameter, torvalds, rjw, linux-kernel, bunk, protasnb,
kernel-testers
Hi Andrew, Vegard, Ingo,
On Fri, Aug 29, 2008 at 1:17 AM, Andrew Morton
<akpm@linux-foundation.org> wrote:
> On Thu, 28 Aug 2008 17:32:14 +0200
> Vegard Nossum <vegard.nossum@gmail.com> wrote:
>
>> On Thu, Aug 28, 2008 at 3:56 PM, Ingo Molnar <mingo@elte.hu> wrote:
>> > could you resend the final patch please? It's a candidate for .27, if it
>> > works out fine.
>>
>> Here is the combined patch. I've tested it only briefly, and I am
>> unsure of whether it still produces lockdep warnings for Daniel or
>> not. I wish it would not be applied anywhere unless it was
>> officially Reviewed-by: someone. In particular, I'm not quite
>> steady with the irq-safe locking (Thomas might want to have a look).
>>
>
> It all looks good to me.
>
>>
>>
>> >From 977cf583b79be7308d5e310711fe6038c8af96a4 Mon Sep 17 00:00:00 2001
>> From: Vegard Nossum <vegard.nossum@gmail.com>
>> Date: Thu, 28 Aug 2008 17:09:57 +0200
>> Subject: [PATCH] debugobjects: fix lockdep warning #2
>>
>> Daniel J. Blueman reported:
>> > =======================================================
>> > [ INFO: possible circular locking dependency detected ]
>> > 2.6.27-rc4-224c #1
>> > -------------------------------------------------------
>> > hald/4680 is trying to acquire lock:
>> > (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80
>> >
>> > but task is already holding lock:
>> > (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>]
>> > debug_object_free+0x5c/0x120
>>
>> We fix it by moving the actual freeing to outside the lock (the lock
>> now only protects the list).
>>
>> The lock is also promoted to irq-safe (suggested by Dan).
>
> What was the reason for this other change? I'm sure Dan is a fine chap,
> but we usually prefer a little more justification for changes ;)
IRQ-safe xtime_lock is taken, then pool_lock is taken in
__debug_object_init, which is potentially unsafe. Upgrading
pool_lock's usage to IRQ-safe ensures there can be no potential for
deadlock.
>> Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
>> Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
>> ---
>> lib/debugobjects.c | 38 +++++++++++++++++++++++++++++---------
>> 1 files changed, 29 insertions(+), 9 deletions(-)
>>
>> diff --git a/lib/debugobjects.c b/lib/debugobjects.c
>> index 19acf8c..acf9ed8 100644
>> --- a/lib/debugobjects.c
>> +++ b/lib/debugobjects.c
>> @@ -115,9 +115,10 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
>> static struct debug_obj *
>> alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
>> {
>> + unsigned long flags;
>> struct debug_obj *obj = NULL;
>>
>> - spin_lock(&pool_lock);
>> + spin_lock_irqsave(&pool_lock, flags);
>> if (obj_pool.first) {
>> obj = hlist_entry(obj_pool.first, typeof(*obj), node);
>>
>> @@ -136,7 +137,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
>> if (obj_pool_free < obj_pool_min_free)
>> obj_pool_min_free = obj_pool_free;
>> }
>> - spin_unlock(&pool_lock);
>> + spin_unlock_irqrestore(&pool_lock, flags);
>>
>> return obj;
>> }
>> @@ -146,18 +147,19 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
>> */
>> static void free_object(struct debug_obj *obj)
>> {
>> + unsigned long flags;
>> unsigned long idx = (unsigned long)(obj - obj_static_pool);
>>
>> if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
>> - spin_lock(&pool_lock);
>> + spin_lock_irqsave(&pool_lock, flags);
>> hlist_add_head(&obj->node, &obj_pool);
>> obj_pool_free++;
>> obj_pool_used--;
>> - spin_unlock(&pool_lock);
>> + spin_unlock_irqrestore(&pool_lock, flags);
>> } else {
>> - spin_lock(&pool_lock);
>> + spin_lock_irqsave(&pool_lock, flags);
>> obj_pool_used--;
>> - spin_unlock(&pool_lock);
>> + spin_unlock_irqrestore(&pool_lock, flags);
>> kmem_cache_free(obj_cache, obj);
>> }
>> }
>> @@ -170,19 +172,28 @@ static void debug_objects_oom(void)
>> {
>> struct debug_bucket *db = obj_hash;
>> struct hlist_node *node, *tmp;
>> + HLIST_HEAD(freelist);
>> struct debug_obj *obj;
>> unsigned long flags;
>> int i;
>>
>> printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
>>
>> + /* XXX: Could probably be optimized by transplantation of more than
>> + * one entry at a time. */
>> for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
>> spin_lock_irqsave(&db->lock, flags);
>> hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
>> hlist_del(&obj->node);
>> - free_object(obj);
>> + hlist_add_head(&obj->node, &freelist);
>> }
>> spin_unlock_irqrestore(&db->lock, flags);
>> +
>> + /* Now free them */
>> + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
>> + hlist_del(&obj->node);
>> + free_object(obj);
>
> I suspect that we can avoid the hlist_del() here, perhaps with a little
> effort.
>
>> +
>> + /* Now free them */
>> + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
>> + hlist_del(&obj->node);
>> + free_object(obj);
>> + }
>> +
>
> and the other one.
>
> But I'm not sure that it's worth putting effort into - leaving dead
> objects strung onto a partially-live list is a little bit smelly IMO.
I've done some fairly heavy testing with the patch at it's current
state (ie with the upgraded pool_lock, explained above), and it _is_
in fact solid; I wasn't looking at the right setup previously.
(with the other XFS tweaks too) I'm not able to cause any
deadlocks/stack traces/warnings with maximum debugging [* the KVM
errors are another story], which would be the first time so far, so
the patch looks good for mainline and 2.6.27 is looking very strong!
Daniel
--- [*]
emulation failed (pagetable) rip bf9032c5 0f 6f 17 0f
__ratelimit: 1804664 callbacks suppressed
Fail to handle apic access vmexit! Offset is 0xf0
--
Daniel J Blueman
^ permalink raw reply [flat|nested] 6+ messages in thread* [GIT pull] debugobject fixes for 2.6.27
2008-08-29 21:57 ` Daniel J Blueman
@ 2008-09-01 10:59 ` Thomas Gleixner
0 siblings, 0 replies; 6+ messages in thread
From: Thomas Gleixner @ 2008-09-01 10:59 UTC (permalink / raw)
To: Linus Torvalds
Cc: Andrew Morton, Vegard Nossum, Ingo Molnar, Rafael J. Wysocki,
LKML, bunk, protasnb, kernel-testers, Daniel J Blueman
Linus,
Please pull the latest core/debugobjects git tree from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git core/debugobjects
It contains a lockdep fix for the debug objects core code. Please note
that I added a helper function to move a hlist from one head to
another to avoid open coding it in the debugobjects code.
Thanks,
tglx
------------------>
Vegard Nossum (1):
debugobjects: fix lockdep warning
include/linux/list.h | 13 +++++++++++++
lib/debugobjects.c | 31 +++++++++++++++++++++++--------
2 files changed, 36 insertions(+), 8 deletions(-)
diff --git a/include/linux/list.h b/include/linux/list.h
index db35ef0..969f6e9 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -619,6 +619,19 @@ static inline void hlist_add_after(struct hlist_node *n,
next->next->pprev = &next->next;
}
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 45a6bde..e3ab374 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
/*
* Allocate a new object. If the pool is empty, switch off the debugger.
+ * Must be called with interrupts disabled.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
static void free_object(struct debug_obj *obj)
{
unsigned long idx = (unsigned long)(obj - obj_static_pool);
+ unsigned long flags;
if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
} else {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
}
}
@@ -171,6 +173,7 @@ static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj *obj;
unsigned long flags;
int i;
@@ -179,11 +182,14 @@ static void debug_objects_oom(void)
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
spin_lock_irqsave(&db->lock, flags);
- hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ hlist_move_list(&db->list, &freelist);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
- spin_unlock_irqrestore(&db->lock, flags);
}
}
@@ -498,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
return;
default:
hlist_del(&obj->node);
+ spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
- break;
+ return;
}
out_unlock:
spin_unlock_irqrestore(&db->lock, flags);
@@ -510,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
@@ -545,11 +553,18 @@ repeat:
goto repeat;
default:
hlist_del(&obj->node);
- free_object(obj);
+ hlist_add_head(&obj->node, &freelist);
break;
}
}
spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
+
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
}
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] debugobjects: fix lockdep warning #2
2008-08-28 15:32 [PATCH] debugobjects: fix lockdep warning #2 Vegard Nossum
2008-08-29 0:17 ` Andrew Morton
@ 2008-08-29 10:42 ` Thomas Gleixner
1 sibling, 0 replies; 6+ messages in thread
From: Thomas Gleixner @ 2008-08-29 10:42 UTC (permalink / raw)
To: Vegard Nossum
Cc: Ingo Molnar, Daniel J Blueman, Christoph Lameter, Linus Torvalds,
Rafael J. Wysocki, Linux Kernel Mailing List, Adrian Bunk,
Andrew Morton, Natalie Protasevich, Kernel Testers List
On Thu, 28 Aug 2008, Vegard Nossum wrote:
> On Thu, Aug 28, 2008 at 3:56 PM, Ingo Molnar <mingo@elte.hu> wrote:
> > could you resend the final patch please? It's a candidate for .27, if it
> > works out fine.
>
> Here is the combined patch. I've tested it only briefly, and I am
> unsure of whether it still produces lockdep warnings for Daniel or
> not. I wish it would not be applied anywhere unless it was
> officially Reviewed-by: someone. In particular, I'm not quite
> steady with the irq-safe locking (Thomas might want to have a look).
The irq safe locking is the easy part :) Calling free_object w/o the
hash bucket lock held is fine as we removed it already from the bucket
and there is no other reference than the pointer we hold at that point.
>
> printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
>
> + /* XXX: Could probably be optimized by transplantation of more than
> + * one entry at a time. */
> for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
> spin_lock_irqsave(&db->lock, flags);
> hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
> hlist_del(&obj->node);
> - free_object(obj);
> + hlist_add_head(&obj->node, &freelist);
> }
> spin_unlock_irqrestore(&db->lock, flags);
> +
> + /* Now free them */
> + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
> + hlist_del(&obj->node);
> + free_object(obj);
> + }
> }
> }
What you want is a helper function which moves the complete list from
one head to the other. I'll whip one up.
I'll push it through the tip tree.
Thanks,
tglx
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2008-09-01 11:00 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-08-28 15:32 [PATCH] debugobjects: fix lockdep warning #2 Vegard Nossum
2008-08-29 0:17 ` Andrew Morton
2008-08-29 14:02 ` Thomas Gleixner
2008-08-29 21:57 ` Daniel J Blueman
2008-09-01 10:59 ` [GIT pull] debugobject fixes for 2.6.27 Thomas Gleixner
2008-08-29 10:42 ` [PATCH] debugobjects: fix lockdep warning #2 Thomas Gleixner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox