* [PATCH] lockdep: returns after DEBUG_LOCKS_WARN_ONs etc.
@ 2006-12-18 11:56 Jarek Poplawski
2006-12-18 14:39 ` [patch] lockdep: more unlock-on-error fixes Ingo Molnar
0 siblings, 1 reply; 5+ messages in thread
From: Jarek Poplawski @ 2006-12-18 11:56 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton
Hello,
If any of this proposals should be omitted or separated
let me know.
Regards,
Jarek P.
---
[PATCH] lockdep: returns after DEBUG_LOCKS_WARN_ONs etc.
lockdep.c changes:
- returns after DEBUG_LOCKS_WARN_ON added in 3 places
- debug_locks checking after lookup_chain_cache()
added in __lock_acquire()
- locking for testing and changing global variable
max_lockdep_depth added in __lock_acquire()
- local_irq_save/restore() replaced by raw versions
in debug_check_no_locks_freed()
Signed-off-by: Jarek Poplawski <jarkao2@o2.pl>
---
diff -Nurp linux-2.6.20-rc1-/kernel/lockdep.c linux-2.6.20-rc1/kernel/lockdep.c
--- linux-2.6.20-rc1-/kernel/lockdep.c 2006-12-18 08:59:24.000000000 +0100
+++ linux-2.6.20-rc1/kernel/lockdep.c 2006-12-18 11:57:40.000000000 +0100
@@ -1293,7 +1293,8 @@ out_unlock_set:
if (!subclass || force)
lock->class_cache = class;
- DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
+ if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
+ return NULL;
return class;
}
@@ -1308,7 +1309,8 @@ static inline int lookup_chain_cache(u64
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
@@ -1390,7 +1392,9 @@ static void check_chain_key(struct task_
return;
}
id = hlock->class - lock_classes;
- DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
+ if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
+ return;
+
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = 0;
@@ -2201,7 +2205,11 @@ out_calc_hash:
if (!check_prevs_add(curr, hlock))
return 0;
graph_unlock();
- }
+
+ } else if (unlikely(!debug_locks))
+ /* after lookup_chain_cache() */
+ return 0;
+
curr->lockdep_depth++;
check_chain_key(curr);
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
@@ -2210,9 +2218,14 @@ out_calc_hash:
printk("turning off the locking correctness validator.\n");
return 0;
}
+
+ if (!graph_lock())
+ return 0;
+
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
+ graph_unlock();
return 1;
}
@@ -2665,7 +2678,7 @@ void debug_check_no_locks_freed(const vo
if (unlikely(!debug_locks))
return;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -2679,7 +2692,7 @@ void debug_check_no_locks_freed(const vo
print_freed_lock_bug(curr, mem_from, mem_to, hlock);
break;
}
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
^ permalink raw reply [flat|nested] 5+ messages in thread* [patch] lockdep: more unlock-on-error fixes
2006-12-18 11:56 [PATCH] lockdep: returns after DEBUG_LOCKS_WARN_ONs etc Jarek Poplawski
@ 2006-12-18 14:39 ` Ingo Molnar
2006-12-19 7:21 ` Jarek Poplawski
2006-12-19 9:50 ` [patch] lockdep: more unlock-on-error fixes, fix Ingo Molnar
0 siblings, 2 replies; 5+ messages in thread
From: Ingo Molnar @ 2006-12-18 14:39 UTC (permalink / raw)
To: Jarek Poplawski; +Cc: linux-kernel, Andrew Morton, Linus Torvalds
* Jarek Poplawski <jarkao2@o2.pl> wrote:
> Hello,
>
> If any of this proposals should be omitted or separated let me know.
thanks for the fixes, they look good to me. I have reorganized the
__lock_acquire() changes a bit. Plus i dropped the check_locks_freed()
changes: there's no reason lockdep should be using 'raw' irq flags
saving - these functions are not part of the irq-flags tracing code so
they dont /need/ to be raw.
An updated patch is below. I also have boot tested it. Andrew, Linus,
please apply.
Ingo
------------------------->
Subject: [patch] lockdep: more unlock-on-error fixes
From: Jarek Poplawski <jarkao2@o2.pl>
- returns after DEBUG_LOCKS_WARN_ON added in 3 places
- debug_locks checking after lookup_chain_cache()
added in __lock_acquire()
- locking for testing and changing global variable
max_lockdep_depth added in __lock_acquire()
Signed-off-by: Jarek Poplawski <jarkao2@o2.pl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/lockdep.c | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
Index: linux/kernel/lockdep.c
===================================================================
--- linux.orig/kernel/lockdep.c
+++ linux/kernel/lockdep.c
@@ -1297,7 +1297,8 @@ out_unlock_set:
if (!subclass || force)
lock->class_cache = class;
- DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
+ if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
+ return NULL;
return class;
}
@@ -1312,7 +1313,8 @@ static inline int lookup_chain_cache(u64
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
@@ -1394,7 +1396,9 @@ static void check_chain_key(struct task_
return;
}
id = hlock->class - lock_classes;
- DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
+ if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
+ return;
+
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = 0;
@@ -2210,19 +2214,24 @@ out_calc_hash:
if (!chain_head && ret != 2)
if (!check_prevs_add(curr, hlock))
return 0;
- graph_unlock();
- }
+ } else
+ /* after lookup_chain_cache(): */
+ if (unlikely(!debug_locks))
+ return 0;
+
curr->lockdep_depth++;
check_chain_key(curr);
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
- debug_locks_off();
+ debug_locks_off_graph_unlock();
printk("BUG: MAX_LOCK_DEPTH too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
}
+
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
+ graph_unlock();
return 1;
}
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: [patch] lockdep: more unlock-on-error fixes
2006-12-18 14:39 ` [patch] lockdep: more unlock-on-error fixes Ingo Molnar
@ 2006-12-19 7:21 ` Jarek Poplawski
2006-12-19 9:50 ` [patch] lockdep: more unlock-on-error fixes, fix Ingo Molnar
1 sibling, 0 replies; 5+ messages in thread
From: Jarek Poplawski @ 2006-12-19 7:21 UTC (permalink / raw)
To: Ingo Molnar; +Cc: linux-kernel, Andrew Morton, Linus Torvalds
On Mon, Dec 18, 2006 at 03:39:36PM +0100, Ingo Molnar wrote:
>
> * Jarek Poplawski <jarkao2@o2.pl> wrote:
>
> > Hello,
> >
> > If any of this proposals should be omitted or separated let me know.
>
> thanks for the fixes, they look good to me. I have reorganized the
> __lock_acquire() changes a bit. Plus i dropped the check_locks_freed()
> changes: there's no reason lockdep should be using 'raw' irq flags
> saving - these functions are not part of the irq-flags tracing code so
> they dont /need/ to be raw.
I'm not 100% convinced - now trace_hardirqs_off/on is
done only for lockdep reasons, so it is like selfcheck.
But it's probably the matter of taste.
...
> Index: linux/kernel/lockdep.c
> ===================================================================
> --- linux.orig/kernel/lockdep.c
> +++ linux/kernel/lockdep.c
...
> @@ -2210,19 +2214,24 @@ out_calc_hash:
> if (!chain_head && ret != 2)
> if (!check_prevs_add(curr, hlock))
> return 0;
> - graph_unlock();
> - }
> + } else
> + /* after lookup_chain_cache(): */
> + if (unlikely(!debug_locks))
> + return 0;
> +
> curr->lockdep_depth++;
> check_chain_key(curr);
> if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
> - debug_locks_off();
> + debug_locks_off_graph_unlock();
> printk("BUG: MAX_LOCK_DEPTH too low!\n");
> printk("turning off the locking correctness validator.\n");
> return 0;
> }
> +
> if (unlikely(curr->lockdep_depth > max_lockdep_depth))
> max_lockdep_depth = curr->lockdep_depth;
>
> + graph_unlock();
> return 1;
> }
Sorry but it's not good... There could be no lock
at all here (eg. trylock != 0 || check != 2).
Jarek P.
^ permalink raw reply [flat|nested] 5+ messages in thread* [patch] lockdep: more unlock-on-error fixes, fix
2006-12-18 14:39 ` [patch] lockdep: more unlock-on-error fixes Ingo Molnar
2006-12-19 7:21 ` Jarek Poplawski
@ 2006-12-19 9:50 ` Ingo Molnar
2006-12-19 13:22 ` Jarek Poplawski
1 sibling, 1 reply; 5+ messages in thread
From: Ingo Molnar @ 2006-12-19 9:50 UTC (permalink / raw)
To: Jarek Poplawski; +Cc: linux-kernel, Andrew Morton, Linus Torvalds
* Ingo Molnar <mingo@elte.hu> wrote:
> An updated patch is below. I also have boot tested it. Andrew, Linus,
> please apply.
this patch introduced a locking bug, which is fixed by the delta patch
below.
Ingo
------------------------>
Subject: [patch] lockdep: more unlock-on-error fixes, fix
From: Ingo Molnar <mingo@elte.hu>
my __acquire_lock() cleanup introduced a locking bug: on SMP
systems we'd release a non-owned graph lock. Fix this by
moving the graph unlock back, and by leaving the max_lockdep_depth
variable update possibly racy. (we dont care, it's just statistics)
also add some minimal debugging code to graph_unlock()/graph_lock(),
which caught this locking bug.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/lockdep.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
Index: linux/kernel/lockdep.c
===================================================================
--- linux.orig/kernel/lockdep.c
+++ linux/kernel/lockdep.c
@@ -70,6 +70,9 @@ static int graph_lock(void)
static inline int graph_unlock(void)
{
+ if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
+ return DEBUG_LOCKS_WARN_ON(1);
+
__raw_spin_unlock(&lockdep_lock);
return 0;
}
@@ -716,6 +719,9 @@ find_usage_backwards(struct lock_class *
struct lock_list *entry;
int ret;
+ if (!__raw_spin_is_locked(&lockdep_lock))
+ return DEBUG_LOCKS_WARN_ON(1);
+
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
@@ -2208,6 +2214,7 @@ out_calc_hash:
if (!chain_head && ret != 2)
if (!check_prevs_add(curr, hlock))
return 0;
+ graph_unlock();
} else
/* after lookup_chain_cache(): */
if (unlikely(!debug_locks))
@@ -2216,7 +2223,7 @@ out_calc_hash:
curr->lockdep_depth++;
check_chain_key(curr);
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
- debug_locks_off_graph_unlock();
+ debug_locks_off();
printk("BUG: MAX_LOCK_DEPTH too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
@@ -2225,7 +2232,6 @@ out_calc_hash:
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
- graph_unlock();
return 1;
}
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: [patch] lockdep: more unlock-on-error fixes, fix
2006-12-19 9:50 ` [patch] lockdep: more unlock-on-error fixes, fix Ingo Molnar
@ 2006-12-19 13:22 ` Jarek Poplawski
0 siblings, 0 replies; 5+ messages in thread
From: Jarek Poplawski @ 2006-12-19 13:22 UTC (permalink / raw)
To: Ingo Molnar; +Cc: linux-kernel, Andrew Morton, Linus Torvalds
On Tue, Dec 19, 2006 at 10:50:47AM +0100, Ingo Molnar wrote:
...
> moving the graph unlock back, and by leaving the max_lockdep_depth
> variable update possibly racy. (we dont care, it's just statistics)
I would agree if it were not the lockdep.
I mean it's like the "father figure"!
> also add some minimal debugging code to graph_unlock()/graph_lock(),
> which caught this locking bug.
>
> Signed-off-by: Ingo Molnar <mingo@elte.hu>
> ---
> kernel/lockdep.c | 10 ++++++++--
> 1 file changed, 8 insertions(+), 2 deletions(-)
>
> Index: linux/kernel/lockdep.c
> ===================================================================
> --- linux.orig/kernel/lockdep.c
> +++ linux/kernel/lockdep.c
> @@ -70,6 +70,9 @@ static int graph_lock(void)
>
> static inline int graph_unlock(void)
> {
> + if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
> + return DEBUG_LOCKS_WARN_ON(1);
> +
> __raw_spin_unlock(&lockdep_lock);
> return 0;
> }
> @@ -716,6 +719,9 @@ find_usage_backwards(struct lock_class *
> struct lock_list *entry;
> int ret;
>
> + if (!__raw_spin_is_locked(&lockdep_lock))
> + return DEBUG_LOCKS_WARN_ON(1);
> +
> if (depth > max_recursion_depth)
> max_recursion_depth = depth;
> if (depth >= RECURSION_LIMIT)
> @@ -2208,6 +2214,7 @@ out_calc_hash:
> if (!chain_head && ret != 2)
> if (!check_prevs_add(curr, hlock))
> return 0;
> + graph_unlock();
> } else
> /* after lookup_chain_cache(): */
> if (unlikely(!debug_locks))
Probably similar changes should be done in
debug_locks_off_graph_unlock() etc.
I think it's going slightly complicated - there is
hard to say where and when the lock is really on.
Maybe graph_lock needs some rethinking?
My proposal is to do unconditional locking in
graph_lock() and always check its return value e.g.:
if (!graph_lock()) {
graph_unlock();
return 0;
}
It is clear and gives some place for exceptions.
Jarek P.
PS: thanks for this followup_to info!
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2006-12-19 13:21 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-12-18 11:56 [PATCH] lockdep: returns after DEBUG_LOCKS_WARN_ONs etc Jarek Poplawski
2006-12-18 14:39 ` [patch] lockdep: more unlock-on-error fixes Ingo Molnar
2006-12-19 7:21 ` Jarek Poplawski
2006-12-19 9:50 ` [patch] lockdep: more unlock-on-error fixes, fix Ingo Molnar
2006-12-19 13:22 ` Jarek Poplawski
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox