public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* 2.6.21-rc5-rt4 inconsistent lock state
@ 2007-03-29 12:38 Michal Piotrowski
  2007-03-29 18:18 ` Michal Piotrowski
  0 siblings, 1 reply; 2+ messages in thread
From: Michal Piotrowski @ 2007-03-29 12:38 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, LKML

Hi,

artsd during kde start triggered this

=================================
[ INFO: inconsistent lock state ]
[ 2.6.21-rc5-rt4 #4
---------------------------------
inconsistent {hardirq-on-W} -> {in-hardirq-W} usage.
artsd/3454 [HC1[1]:SC0[0]:HE0:SE1] takes:
 ((raw_spinlock_t *)(&lock->wait_lock)){+-..}, at: [<c0358d1f>] rt_spin_lock_slowlock+0x29/0x1b4
{hardirq-on-W} state was registered at:
  [<c0143a91>] mark_lock+0x73/0x4a9
  [<c0143f19>] mark_held_locks+0x52/0x6f
  [<c0144115>] trace_hardirqs_on+0x116/0x162
  [<c035a616>] __spin_unlock_irqrestore+0x54/0x70
  [<c014a2a4>] task_blocks_on_rt_mutex+0x1d6/0x226
  [<c0358dc5>] rt_spin_lock_slowlock+0xcf/0x1b4
  [<c035959b>] rt_spin_lock+0x13/0x4b
  [<c035804f>] wait_for_completion+0x2e/0xab
  [<c012048d>] set_cpus_allowed+0x88/0xab
  [<c012055b>] measure_one+0xab/0x184
  [<c0120be5>] build_sched_domains+0x5b1/0xc19
  [<c0121272>] arch_init_sched_domains+0x25/0x27
  [<c04dccec>] sched_init_smp+0x1c/0x6a
  [<c04d0590>] init+0x121/0x361
  [<c0104ff7>] kernel_thread_helper+0x7/0x10
  [<ffffffff>] 0xffffffff
irq event stamp: 116668
hardirqs last  enabled at (116667): [<c035a616>] __spin_unlock_irqrestore+0x54/0x70
hardirqs last disabled at (116668): [<c0104de9>] apic_timer_interrupt+0x29/0x38
softirqs last  enabled at (0): [<c0123b51>] copy_process+0x542/0x1440
softirqs last disabled at (0): [<00000000>] 0x0

l *0xc0358d1f
0xc0358d1f is in rt_spin_lock_slowlock (kernel/rtmutex.c:640).
635             unsigned long saved_state, state, flags;
636
637             debug_rt_mutex_init_waiter(&waiter);
638             waiter.task = NULL;
639
640             spin_lock_irqsave(&lock->wait_lock, flags);
641             init_lists(lock);
642
643             /* Try to acquire the lock again: */
644             if (try_to_take_rt_mutex(lock)) {

 l *0xc035a616
0xc035a616 is in __spin_unlock_irqrestore (include/asm/irqflags.h:33).
28              return flags;
29      }
30
31      static inline void raw_local_irq_restore(unsigned long flags)
32      {
33              __asm__ __volatile__(
34                      "pushl %0 ; popfl"
35                      : /* no output */
36                      :"g" (flags)
37                      :"memory", "cc"

l *0xc0104de9
0xc0104de9 is at include/asm/bitops.h:246.
241     static int test_bit(int nr, const volatile void * addr);
242     #endif
243
244     static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
245     {
246             return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
247     }
248
249     static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
250     {

 l *0xc0123b51
0xc0123b51 is in copy_process (kernel/fork.c:1129).
1124            p->hardirq_enable_ip = 0;
1125            p->hardirq_enable_event = 0;
1126            p->hardirq_disable_ip = _THIS_IP_;
1127            p->hardirq_disable_event = 0;
1128            p->softirqs_enabled = 1;
1129            p->softirq_enable_ip = _THIS_IP_;
1130            p->softirq_enable_event = 0;
1131            p->softirq_disable_ip = 0;
1132            p->softirq_disable_event = 0;
1133            p->hardirq_context = 0;


other info that might help us debug this:
1 lock held by artsd/3454:
 #0:  (&tu->tread_sem){--..}, at: [<f98abe00>] snd_timer_user_ioctl+0x618/0xb0d [snd_timer]

stack backtrace:
 [<c0105470>] dump_trace+0x78/0x21a
 [<c0105647>] show_trace_log_lvl+0x35/0x54
 [<c0105dcc>] show_trace+0x2c/0x2e
 [<c0105e93>] dump_stack+0x29/0x2b
 [<c014320b>] print_usage_bug+0x14d/0x157
 [<c0143ace>] mark_lock+0xb0/0x4a9
 [<c014498c>] __lock_acquire+0x3cf/0xc36
 [<c0145261>] lock_acquire+0x6e/0x86
 [<c035a10c>] __spin_lock_irqsave+0x55/0x65
 [<c0358d1f>] rt_spin_lock_slowlock+0x29/0x1b4
 [<c035959b>] rt_spin_lock+0x13/0x4b
 [<c0276d47>] serial8250_console_write+0x4e/0x155
 [<c0125c2f>] __call_console_drivers+0x73/0x88
 [<c0125cb8>] _call_console_drivers+0x74/0x7a
 [<c0125f4e>] release_console_sem+0x11e/0x1ad
 [<c0126644>] vprintk+0x2dc/0x32b
 [<c01266b3>] printk+0x20/0x22
 [<c011e39d>] task_running_tick+0x401/0x450
 [<c012180f>] scheduler_tick+0x188/0x1e3
 [<c012f964>] update_process_times+0x40/0x6d
 [<c01416c7>] tick_sched_timer+0x76/0xb6
 [<c013d5b6>] hrtimer_interrupt+0x15d/0x205
 [<c01161c5>] smp_apic_timer_interrupt+0x94/0xa7
 [<c0104df3>] apic_timer_interrupt+0x33/0x38
 [<c035a618>] __spin_unlock_irqrestore+0x56/0x70
 [<c014a325>] rt_mutex_adjust_prio+0x31/0x37
 [<c0358c36>] rt_spin_lock_slowunlock+0x6d/0x73
 [<c0359602>] rt_spin_unlock+0x2f/0x35
 [<c0182d71>] kfree+0x9d/0xa5
 [<f98abea9>] snd_timer_user_ioctl+0x6c1/0xb0d [snd_timer]
 [<c0193d5e>] do_ioctl+0x32/0x7f
 [<c019403c>] vfs_ioctl+0x291/0x2a4
 [<c01940af>] sys_ioctl+0x60/0x79
 [<c010432d>] syscall_call+0x7/0xb
 [<b7f28410>] 0xb7f28410
 =======================
---------------------------
| preempt count: 00010001 ]
| 1-level deep critical section nesting:
----------------------------------------
.. [<c035a0da>] .... __spin_lock_irqsave+0x23/0x65
.....[<c0358d1f>] ..   ( <= rt_spin_lock_slowlock+0x29/0x1b4)

 l *0xc035a0da
0xc035a0da is in __spin_lock_irqsave (kernel/spinlock.c:122).
117     {
118             unsigned long flags;
119
120             local_irq_save(flags);
121             preempt_disable();
122             spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
123             /*
124              * On lockdep we dont want the hand-coded irq-enable of
125              * _raw_spin_lock_flags() code, because lockdep assumes
126              * that interrupts are not re-enabled during lock-acquire:

l *0xc0358d1f
0xc0358d1f is in rt_spin_lock_slowlock (kernel/rtmutex.c:640).
635             unsigned long saved_state, state, flags;
636
637             debug_rt_mutex_init_waiter(&waiter);
638             waiter.task = NULL;
639
640             spin_lock_irqsave(&lock->wait_lock, flags);
641             init_lists(lock);
642
643             /* Try to acquire the lock again: */
644             if (try_to_take_rt_mutex(lock)) {

http://www.stardust.webpages.pl/files/tbf/bitis-gabonica/2.6.21-rc5-rt4/rt-config
http://www.stardust.webpages.pl/files/tbf/bitis-gabonica/2.6.21-rc5-rt4/rt-dmesg

Regards,
Michal

-- 
Michal K. K. Piotrowski
LTG - Linux Testers Group (PL)
(http://www.stardust.webpages.pl/ltg/)
LTG - Linux Testers Group (EN)
(http://www.stardust.webpages.pl/linux_testers_group_en/)

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: 2.6.21-rc5-rt4 inconsistent lock state
  2007-03-29 12:38 2.6.21-rc5-rt4 inconsistent lock state Michal Piotrowski
@ 2007-03-29 18:18 ` Michal Piotrowski
  0 siblings, 0 replies; 2+ messages in thread
From: Michal Piotrowski @ 2007-03-29 18:18 UTC (permalink / raw)
  To: Michal Piotrowski; +Cc: Ingo Molnar, Thomas Gleixner, LKML

Michal Piotrowski napisał(a):
> Hi,
[..]
> http://www.stardust.webpages.pl/files/tbf/bitis-gabonica/2.6.21-rc5-rt4/rt-config
> http://www.stardust.webpages.pl/files/tbf/bitis-gabonica/2.6.21-rc5-rt4/rt-dmesg
> 

I just fixed an isic build in AT :)

BUG: isic:11396 task might have lost a preemption check!
 [<c0105470>] dump_trace+0x78/0x21a
 [<c0105647>] show_trace_log_lvl+0x35/0x54
 [<c0105dcc>] show_trace+0x2c/0x2e
 [<c0105e93>] dump_stack+0x29/0x2b
 [<c0121d64>] preempt_enable_no_resched+0x5c/0x5e
 [<c0218390>] debug_smp_processor_id+0xb0/0xb8
 [<fd98a83f>] nf_conntrack_in+0x39c/0x468 [nf_conntrack]
 [<fd999316>] ipv4_conntrack_local+0x61/0x67 [nf_conntrack_ipv4]
 [<c031c494>] nf_iterate+0x40/0x71
 [<c031c61a>] nf_hook_slow+0x5c/0xd7
 [<c033f066>] raw_sendmsg+0x518/0x6ee
 [<c0346819>] inet_sendmsg+0x4b/0x55
 [<c030109b>] sock_sendmsg+0xf7/0x114
 [<c0301b34>] sys_sendto+0xe5/0x105
 [<c0302521>] sys_socketcall+0x17e/0x254
 [<c010432d>] syscall_call+0x7/0xb
 [<b7f0c410>] 0xb7f0c410
 =======================
---------------------------
| preempt count: 00000000 ]
| 0-level deep critical section nesting:
----------------------------------------

BUG: using smp_processor_id() in preemptible [00000000] code: isic/16471
caller is nf_conntrack_in+0xd0/0x468 [nf_conntrack]
 [<c0105470>] dump_trace+0x78/0x21a
 [<c0105647>] show_trace_log_lvl+0x35/0x54
 [<c0105dcc>] show_trace+0x2c/0x2e
 [<c0105e93>] dump_stack+0x29/0x2b
 [<c021838b>] debug_smp_processor_id+0xab/0xb8
 [<fd98a573>] nf_conntrack_in+0xd0/0x468 [nf_conntrack]
 [<fd999316>] ipv4_conntrack_local+0x61/0x67 [nf_conntrack_ipv4]
 [<c031c494>] nf_iterate+0x40/0x71
 [<c031c61a>] nf_hook_slow+0x5c/0xd7
 [<c033f066>] raw_sendmsg+0x518/0x6ee
 [<c0346819>] inet_sendmsg+0x4b/0x55
 [<c030109b>] sock_sendmsg+0xf7/0x114
 [<c0301b34>] sys_sendto+0xe5/0x105
 [<c0302521>] sys_socketcall+0x17e/0x254
 [<c010432d>] syscall_call+0x7/0xb
 [<b7f01410>] 0xb7f01410
 =======================
---------------------------
| preempt count: 00000001 ]
| 1-level deep critical section nesting:
----------------------------------------
.. [<c0359d9e>] .... __spin_lock+0x1a/0x59
.....[<c016321a>] ..   ( <= handle_fasteoi_irq+0x27/0xdf)

 l *0xc0359d9e
0xc0359d9e is in __spin_lock (kernel/spinlock.c:218).
213     EXPORT_SYMBOL(__write_lock_bh);
214
215     void __lockfunc __spin_lock(raw_spinlock_t *lock)
216     {
217             preempt_disable();
218             spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
219             _raw_spin_lock(lock);
220     }
221
222     EXPORT_SYMBOL(__spin_lock);

l *0xc016321a
0xc016321a is in handle_fasteoi_irq (kernel/irq/chip.c:405).
400             struct irqaction *action;
401             irqreturn_t action_ret;
402
403             spin_lock(&desc->lock);
404
405             if (unlikely(desc->status & IRQ_INPROGRESS))
406                     goto out;
407
408             desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
409             kstat_cpu(cpu).irqs[irq]++;


BUG: using smp_processor_id() in preemptible [00000000] code: isic/16471
caller is nf_conntrack_in+0x43d/0x468 [nf_conntrack]
 [<c0105470>] dump_trace+0x78/0x21a
 [<c0105647>] show_trace_log_lvl+0x35/0x54
 [<c0105dcc>] show_trace+0x2c/0x2e
 [<c0105e93>] dump_stack+0x29/0x2b
 [<c021838b>] debug_smp_processor_id+0xab/0xb8
 [<fd98a8e0>] nf_conntrack_in+0x43d/0x468 [nf_conntrack]
 [<fd999316>] ipv4_conntrack_local+0x61/0x67 [nf_conntrack_ipv4]
 [<c031c494>] nf_iterate+0x40/0x71
 [<c031c61a>] nf_hook_slow+0x5c/0xd7
 [<c033f066>] raw_sendmsg+0x518/0x6ee
 [<c0346819>] inet_sendmsg+0x4b/0x55
 [<c030109b>] sock_sendmsg+0xf7/0x114
 [<c0301b34>] sys_sendto+0xe5/0x105
 [<c0302521>] sys_socketcall+0x17e/0x254
 [<c010432d>] syscall_call+0x7/0xb
 [<b7f01410>] 0xb7f01410
 =======================
---------------------------
| preempt count: 00000001 ]
| 1-level deep critical section nesting:
----------------------------------------
.. [<c0218336>] .... debug_smp_processor_id+0x56/0xb8
.....[<fd98a8e0>] ..   ( <= nf_conntrack_in+0x43d/0x468 [nf_conntrack])

 l *0xc0218336
0xc0218336 is in debug_smp_processor_id (lib/smp_processor_id.c:42).
37              /*
38               * Avoid recursion:
39               */
40              preempt_disable();
41
42              if (!printk_ratelimit())
43                      goto out_enable;
44
45              printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count()-1, current->comm, current->pid);
46              print_symbol("caller is %s\n", (long)__builtin_return_address(0));

http://www.stardust.webpages.pl/files/tbf/bitis-gabonica/2.6.21-rc5-rt4/rt-dmesg2

Regards,
Michal

-- 
Michal K. K. Piotrowski
LTG - Linux Testers Group (PL)
(http://www.stardust.webpages.pl/ltg/)
LTG - Linux Testers Group (EN)
(http://www.stardust.webpages.pl/linux_testers_group_en/)

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2007-03-29 18:18 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-03-29 12:38 2.6.21-rc5-rt4 inconsistent lock state Michal Piotrowski
2007-03-29 18:18 ` Michal Piotrowski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox