* [patch 00/23] locking: name space cleanup and -rt spinlock annotation
@ 2009-12-06 18:01 Thomas Gleixner
2009-12-06 18:01 ` [patch 01/23] locking: Reorder functions in spinlock.c Thomas Gleixner
` (22 more replies)
0 siblings, 23 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:01 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
PREEMPT_RT needs to annotate spinlocks which cannot be converted to
sleeping locks.
The best name raw_spinlock is already taken by the low level
architecture implementations. We tried to come up with a good name for
those locks but none of the naming suggestions was really great.
Linus finally suggested to cleanup the name space and rename the low
level locks in the architecture code to arch_spinlock.
The following patch series contains:
- the name space cleanup
- the new raw_spinlock implementation
- annotation of the most important locks
A git version of the series can be found here:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip rt/locking
Thanks,
tglx
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 01/23] locking: Reorder functions in spinlock.c
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
@ 2009-12-06 18:01 ` Thomas Gleixner
2009-12-06 18:01 ` Thomas Gleixner
2009-12-06 18:01 ` [patch 02/23] locking: Split rwlock from spinlock headers Thomas Gleixner
` (21 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:01 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-reorder-functions-in-spinlock-c.patch --]
[-- Type: text/plain, Size: 8973 bytes --]
Separate spin_lock and rw_lock functions. Preempt-RT needs to exclude
the rw_lock functions from being compiled. The reordering allows to do
that with a single #ifdef.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/spinlock.c | 246 +++++++++++++++++++++++++++---------------------------
1 file changed, 123 insertions(+), 123 deletions(-)
Index: linux-2.6-tip/kernel/spinlock.c
===================================================================
--- linux-2.6-tip.orig/kernel/spinlock.c
+++ linux-2.6-tip/kernel/spinlock.c
@@ -113,41 +113,6 @@ BUILD_LOCK_OPS(write, rwlock);
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
-{
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-EXPORT_SYMBOL(_spin_lock_nested);
-
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
- int subclass)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
- _raw_spin_lock_flags, &flags);
- return flags;
-}
-EXPORT_SYMBOL(_spin_lock_irqsave_nested);
-
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
- struct lockdep_map *nest_lock)
-{
- preempt_disable();
- spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-EXPORT_SYMBOL(_spin_lock_nest_lock);
-
-#endif
-
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
int __lockfunc _spin_trylock(spinlock_t *lock)
{
@@ -156,28 +121,20 @@ int __lockfunc _spin_trylock(spinlock_t
EXPORT_SYMBOL(_spin_trylock);
#endif
-#ifndef CONFIG_INLINE_READ_TRYLOCK
-int __lockfunc _read_trylock(rwlock_t *lock)
-{
- return __read_trylock(lock);
-}
-EXPORT_SYMBOL(_read_trylock);
-#endif
-
-#ifndef CONFIG_INLINE_WRITE_TRYLOCK
-int __lockfunc _write_trylock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
+int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
- return __write_trylock(lock);
+ return __spin_trylock_bh(lock);
}
-EXPORT_SYMBOL(_write_trylock);
+EXPORT_SYMBOL(_spin_trylock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK
-void __lockfunc _read_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK
+void __lockfunc _spin_lock(spinlock_t *lock)
{
- __read_lock(lock);
+ __spin_lock(lock);
}
-EXPORT_SYMBOL(_read_lock);
+EXPORT_SYMBOL(_spin_lock);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
@@ -204,84 +161,76 @@ void __lockfunc _spin_lock_bh(spinlock_t
EXPORT_SYMBOL(_spin_lock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
-{
- return __read_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_read_lock_irqsave);
-#endif
-
-#ifndef CONFIG_INLINE_READ_LOCK_IRQ
-void __lockfunc _read_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK
+void __lockfunc _spin_unlock(spinlock_t *lock)
{
- __read_lock_irq(lock);
+ __spin_unlock(lock);
}
-EXPORT_SYMBOL(_read_lock_irq);
+EXPORT_SYMBOL(_spin_unlock);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_BH
-void __lockfunc _read_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
- __read_lock_bh(lock);
+ __spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_read_lock_bh);
+EXPORT_SYMBOL(_spin_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
- return __write_lock_irqsave(lock);
+ __spin_unlock_irq(lock);
}
-EXPORT_SYMBOL(_write_lock_irqsave);
+EXPORT_SYMBOL(_spin_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
-void __lockfunc _write_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
+void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
- __write_lock_irq(lock);
+ __spin_unlock_bh(lock);
}
-EXPORT_SYMBOL(_write_lock_irq);
+EXPORT_SYMBOL(_spin_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_BH
-void __lockfunc _write_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_TRYLOCK
+int __lockfunc _read_trylock(rwlock_t *lock)
{
- __write_lock_bh(lock);
+ return __read_trylock(lock);
}
-EXPORT_SYMBOL(_write_lock_bh);
+EXPORT_SYMBOL(_read_trylock);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK
-void __lockfunc _spin_lock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK
+void __lockfunc _read_lock(rwlock_t *lock)
{
- __spin_lock(lock);
+ __read_lock(lock);
}
-EXPORT_SYMBOL(_spin_lock);
+EXPORT_SYMBOL(_read_lock);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK
-void __lockfunc _write_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
- __write_lock(lock);
+ return __read_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_write_lock);
+EXPORT_SYMBOL(_read_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK
-void __lockfunc _spin_unlock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQ
+void __lockfunc _read_lock_irq(rwlock_t *lock)
{
- __spin_unlock(lock);
+ __read_lock_irq(lock);
}
-EXPORT_SYMBOL(_spin_unlock);
+EXPORT_SYMBOL(_read_lock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK
-void __lockfunc _write_unlock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_BH
+void __lockfunc _read_lock_bh(rwlock_t *lock)
{
- __write_unlock(lock);
+ __read_lock_bh(lock);
}
-EXPORT_SYMBOL(_write_unlock);
+EXPORT_SYMBOL(_read_lock_bh);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK
@@ -292,30 +241,6 @@ void __lockfunc _read_unlock(rwlock_t *l
EXPORT_SYMBOL(_read_unlock);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
-{
- __spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(_spin_unlock_irqrestore);
-#endif
-
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)
-{
- __spin_unlock_irq(lock);
-}
-EXPORT_SYMBOL(_spin_unlock_irq);
-#endif
-
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)
-{
- __spin_unlock_bh(lock);
-}
-EXPORT_SYMBOL(_spin_unlock_bh);
-#endif
-
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
@@ -340,6 +265,54 @@ void __lockfunc _read_unlock_bh(rwlock_t
EXPORT_SYMBOL(_read_unlock_bh);
#endif
+#ifndef CONFIG_INLINE_WRITE_TRYLOCK
+int __lockfunc _write_trylock(rwlock_t *lock)
+{
+ return __write_trylock(lock);
+}
+EXPORT_SYMBOL(_write_trylock);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK
+void __lockfunc _write_lock(rwlock_t *lock)
+{
+ __write_lock(lock);
+}
+EXPORT_SYMBOL(_write_lock);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+{
+ return __write_lock_irqsave(lock);
+}
+EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
+void __lockfunc _write_lock_irq(rwlock_t *lock)
+{
+ __write_lock_irq(lock);
+}
+EXPORT_SYMBOL(_write_lock_irq);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_BH
+void __lockfunc _write_lock_bh(rwlock_t *lock)
+{
+ __write_lock_bh(lock);
+}
+EXPORT_SYMBOL(_write_lock_bh);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_UNLOCK
+void __lockfunc _write_unlock(rwlock_t *lock)
+{
+ __write_unlock(lock);
+}
+EXPORT_SYMBOL(_write_unlock);
+#endif
+
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
@@ -364,12 +337,39 @@ void __lockfunc _write_unlock_bh(rwlock_
EXPORT_SYMBOL(_write_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
-int __lockfunc _spin_trylock_bh(spinlock_t *lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
{
- return __spin_trylock_bh(lock);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_trylock_bh);
+EXPORT_SYMBOL(_spin_lock_nested);
+
+unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
+ int subclass)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
+ _raw_spin_lock_flags, &flags);
+ return flags;
+}
+EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+ struct lockdep_map *nest_lock)
+{
+ preempt_disable();
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+EXPORT_SYMBOL(_spin_lock_nest_lock);
+
#endif
notrace int in_lock_functions(unsigned long addr)
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 01/23] locking: Reorder functions in spinlock.c
2009-12-06 18:01 ` [patch 01/23] locking: Reorder functions in spinlock.c Thomas Gleixner
@ 2009-12-06 18:01 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:01 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-reorder-functions-in-spinlock-c.patch --]
[-- Type: text/plain, Size: 8975 bytes --]
Separate spin_lock and rw_lock functions. Preempt-RT needs to exclude
the rw_lock functions from being compiled. The reordering allows to do
that with a single #ifdef.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/spinlock.c | 246 +++++++++++++++++++++++++++---------------------------
1 file changed, 123 insertions(+), 123 deletions(-)
Index: linux-2.6-tip/kernel/spinlock.c
===================================================================
--- linux-2.6-tip.orig/kernel/spinlock.c
+++ linux-2.6-tip/kernel/spinlock.c
@@ -113,41 +113,6 @@ BUILD_LOCK_OPS(write, rwlock);
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
-{
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-EXPORT_SYMBOL(_spin_lock_nested);
-
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
- int subclass)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
- _raw_spin_lock_flags, &flags);
- return flags;
-}
-EXPORT_SYMBOL(_spin_lock_irqsave_nested);
-
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
- struct lockdep_map *nest_lock)
-{
- preempt_disable();
- spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-EXPORT_SYMBOL(_spin_lock_nest_lock);
-
-#endif
-
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
int __lockfunc _spin_trylock(spinlock_t *lock)
{
@@ -156,28 +121,20 @@ int __lockfunc _spin_trylock(spinlock_t
EXPORT_SYMBOL(_spin_trylock);
#endif
-#ifndef CONFIG_INLINE_READ_TRYLOCK
-int __lockfunc _read_trylock(rwlock_t *lock)
-{
- return __read_trylock(lock);
-}
-EXPORT_SYMBOL(_read_trylock);
-#endif
-
-#ifndef CONFIG_INLINE_WRITE_TRYLOCK
-int __lockfunc _write_trylock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
+int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
- return __write_trylock(lock);
+ return __spin_trylock_bh(lock);
}
-EXPORT_SYMBOL(_write_trylock);
+EXPORT_SYMBOL(_spin_trylock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK
-void __lockfunc _read_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK
+void __lockfunc _spin_lock(spinlock_t *lock)
{
- __read_lock(lock);
+ __spin_lock(lock);
}
-EXPORT_SYMBOL(_read_lock);
+EXPORT_SYMBOL(_spin_lock);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
@@ -204,84 +161,76 @@ void __lockfunc _spin_lock_bh(spinlock_t
EXPORT_SYMBOL(_spin_lock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
-{
- return __read_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_read_lock_irqsave);
-#endif
-
-#ifndef CONFIG_INLINE_READ_LOCK_IRQ
-void __lockfunc _read_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK
+void __lockfunc _spin_unlock(spinlock_t *lock)
{
- __read_lock_irq(lock);
+ __spin_unlock(lock);
}
-EXPORT_SYMBOL(_read_lock_irq);
+EXPORT_SYMBOL(_spin_unlock);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_BH
-void __lockfunc _read_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
- __read_lock_bh(lock);
+ __spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_read_lock_bh);
+EXPORT_SYMBOL(_spin_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
- return __write_lock_irqsave(lock);
+ __spin_unlock_irq(lock);
}
-EXPORT_SYMBOL(_write_lock_irqsave);
+EXPORT_SYMBOL(_spin_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
-void __lockfunc _write_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
+void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
- __write_lock_irq(lock);
+ __spin_unlock_bh(lock);
}
-EXPORT_SYMBOL(_write_lock_irq);
+EXPORT_SYMBOL(_spin_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_BH
-void __lockfunc _write_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_TRYLOCK
+int __lockfunc _read_trylock(rwlock_t *lock)
{
- __write_lock_bh(lock);
+ return __read_trylock(lock);
}
-EXPORT_SYMBOL(_write_lock_bh);
+EXPORT_SYMBOL(_read_trylock);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK
-void __lockfunc _spin_lock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK
+void __lockfunc _read_lock(rwlock_t *lock)
{
- __spin_lock(lock);
+ __read_lock(lock);
}
-EXPORT_SYMBOL(_spin_lock);
+EXPORT_SYMBOL(_read_lock);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK
-void __lockfunc _write_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
- __write_lock(lock);
+ return __read_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_write_lock);
+EXPORT_SYMBOL(_read_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK
-void __lockfunc _spin_unlock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQ
+void __lockfunc _read_lock_irq(rwlock_t *lock)
{
- __spin_unlock(lock);
+ __read_lock_irq(lock);
}
-EXPORT_SYMBOL(_spin_unlock);
+EXPORT_SYMBOL(_read_lock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK
-void __lockfunc _write_unlock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_BH
+void __lockfunc _read_lock_bh(rwlock_t *lock)
{
- __write_unlock(lock);
+ __read_lock_bh(lock);
}
-EXPORT_SYMBOL(_write_unlock);
+EXPORT_SYMBOL(_read_lock_bh);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK
@@ -292,30 +241,6 @@ void __lockfunc _read_unlock(rwlock_t *l
EXPORT_SYMBOL(_read_unlock);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
-{
- __spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(_spin_unlock_irqrestore);
-#endif
-
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)
-{
- __spin_unlock_irq(lock);
-}
-EXPORT_SYMBOL(_spin_unlock_irq);
-#endif
-
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)
-{
- __spin_unlock_bh(lock);
-}
-EXPORT_SYMBOL(_spin_unlock_bh);
-#endif
-
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
@@ -340,6 +265,54 @@ void __lockfunc _read_unlock_bh(rwlock_t
EXPORT_SYMBOL(_read_unlock_bh);
#endif
+#ifndef CONFIG_INLINE_WRITE_TRYLOCK
+int __lockfunc _write_trylock(rwlock_t *lock)
+{
+ return __write_trylock(lock);
+}
+EXPORT_SYMBOL(_write_trylock);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK
+void __lockfunc _write_lock(rwlock_t *lock)
+{
+ __write_lock(lock);
+}
+EXPORT_SYMBOL(_write_lock);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+{
+ return __write_lock_irqsave(lock);
+}
+EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
+void __lockfunc _write_lock_irq(rwlock_t *lock)
+{
+ __write_lock_irq(lock);
+}
+EXPORT_SYMBOL(_write_lock_irq);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_LOCK_BH
+void __lockfunc _write_lock_bh(rwlock_t *lock)
+{
+ __write_lock_bh(lock);
+}
+EXPORT_SYMBOL(_write_lock_bh);
+#endif
+
+#ifndef CONFIG_INLINE_WRITE_UNLOCK
+void __lockfunc _write_unlock(rwlock_t *lock)
+{
+ __write_unlock(lock);
+}
+EXPORT_SYMBOL(_write_unlock);
+#endif
+
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
@@ -364,12 +337,39 @@ void __lockfunc _write_unlock_bh(rwlock_
EXPORT_SYMBOL(_write_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
-int __lockfunc _spin_trylock_bh(spinlock_t *lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
{
- return __spin_trylock_bh(lock);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_trylock_bh);
+EXPORT_SYMBOL(_spin_lock_nested);
+
+unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
+ int subclass)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
+ _raw_spin_lock_flags, &flags);
+ return flags;
+}
+EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+ struct lockdep_map *nest_lock)
+{
+ preempt_disable();
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+EXPORT_SYMBOL(_spin_lock_nest_lock);
+
#endif
notrace int in_lock_functions(unsigned long addr)
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 02/23] locking: Split rwlock from spinlock headers
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
2009-12-06 18:01 ` [patch 01/23] locking: Reorder functions in spinlock.c Thomas Gleixner
@ 2009-12-06 18:01 ` Thomas Gleixner
2009-12-07 17:28 ` Arnd Bergmann
2009-12-06 18:02 ` [patch 03/23] locking: Separate rwlock api from spinlock api Thomas Gleixner
` (20 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:01 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-split-rwlock-h-from-spinlock-h.patch --]
[-- Type: text/plain, Size: 15735 bytes --]
Move the rwlock defines and inlines into separate header files. This
makes the selection for -rt easier.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rwlock.h | 127 +++++++++++++++++++++++++++++++++++++++++
include/linux/rwlock_types.h | 56 ++++++++++++++++++
include/linux/spinlock.h | 100 ++------------------------------
include/linux/spinlock_types.h | 43 +------------
4 files changed, 197 insertions(+), 129 deletions(-)
Index: linux-2.6-tip/include/linux/rwlock.h
===================================================================
--- /dev/null
+++ linux-2.6-tip/include/linux/rwlock.h
@@ -0,0 +1,127 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void _raw_read_lock(rwlock_t *lock);
+#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+#else
+# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_lock_flags(lock, flags) \
+ __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock_flags(lock, flags) \
+ __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+
+#define write_lock(lock) _write_lock(lock)
+#define read_lock(lock) _read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _read_lock_irq(lock)
+#define read_lock_bh(lock) _read_lock_bh(lock)
+#define write_lock_irq(lock) _write_lock_irq(lock)
+#define write_lock_bh(lock) _write_lock_bh(lock)
+#define read_unlock(lock) _read_unlock(lock)
+#define write_unlock(lock) _write_unlock(lock)
+#define read_unlock_irq(lock) _read_unlock_irq(lock)
+#define write_unlock_irq(lock) _write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
Index: linux-2.6-tip/include/linux/rwlock_types.h
===================================================================
--- /dev/null
+++ linux-2.6-tip/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ raw_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -103,20 +103,6 @@ do { \
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define rwlock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key); \
-} while (0)
-#else
-# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
-#endif
-
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
@@ -146,43 +132,21 @@ static inline void smp_mb__after_lock(vo
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
#else
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
-
/*
- * Define the various spin_lock and rw_lock methods. Note we define these
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
- * methods are defined as nops in the case they are not required.
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
*/
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
#define spin_lock(lock) _spin_lock(lock)
@@ -198,9 +162,6 @@ static inline void smp_mb__after_lock(vo
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define spin_lock_irqsave(lock, flags) \
@@ -208,16 +169,6 @@ static inline void smp_mb__after_lock(vo
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave(lock); \
} while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
- } while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define spin_lock_irqsave_nested(lock, flags, subclass) \
@@ -240,16 +191,7 @@ static inline void smp_mb__after_lock(vo
typecheck(unsigned long, flags); \
_spin_lock_irqsave(lock, flags); \
} while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
- } while (0)
+
#define spin_lock_irqsave_nested(lock, flags, subclass) \
spin_lock_irqsave(lock, flags)
@@ -257,16 +199,8 @@ static inline void smp_mb__after_lock(vo
#define spin_lock_irq(lock) _spin_lock_irq(lock)
#define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
#define spin_unlock(lock) _spin_unlock(lock)
-#define read_unlock(lock) _read_unlock(lock)
-#define write_unlock(lock) _write_unlock(lock)
#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-#define read_unlock_irq(lock) _read_unlock_irq(lock)
-#define write_unlock_irq(lock) _write_unlock_irq(lock)
#define spin_unlock_irqrestore(lock, flags) \
do { \
@@ -275,20 +209,6 @@ static inline void smp_mb__after_lock(vo
} while (0)
#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
- } while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
-
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
- } while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
-
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
#define spin_trylock_irq(lock) \
@@ -305,13 +225,6 @@ static inline void smp_mb__after_lock(vo
1 : ({ local_irq_restore(flags); 0; }); \
})
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
-
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -335,6 +248,9 @@ extern int _atomic_dec_and_lock(atomic_t
*/
#define spin_can_lock(lock) (!spin_is_locked(lock))
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
*/
Index: linux-2.6-tip/include/linux/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types.h
+++ linux-2.6-tip/include/linux/spinlock_types.h
@@ -33,22 +33,6 @@ typedef struct {
#define SPINLOCK_MAGIC 0xdead4ead
-typedef struct {
- raw_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,12 +41,6 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
@@ -70,31 +48,22 @@ typedef struct {
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- .magic = RWLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
#else
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
#endif
/*
- * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
- * are hence deprecated.
- * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
- * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 03/23] locking: Separate rwlock api from spinlock api
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
2009-12-06 18:01 ` [patch 01/23] locking: Reorder functions in spinlock.c Thomas Gleixner
2009-12-06 18:01 ` [patch 02/23] locking: Split rwlock from spinlock headers Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 04/23] locking: Convert raw_spinlock to arch_spinlock Thomas Gleixner
` (19 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-split-rwlock-api-from-spinlock-api.patch --]
[-- Type: text/plain, Size: 19161 bytes --]
Move the rwlock smp api defines and functions into a separate header
file. Makes the -rt selection simpler and less intrusive.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rwlock_api_smp.h | 277 +++++++++++++++++++++++++++++++++++++++
include/linux/spinlock_api_smp.h | 252 -----------------------------------
2 files changed, 280 insertions(+), 249 deletions(-)
Index: linux-2.6-tip/include/linux/rwlock_api_smp.h
===================================================================
--- /dev/null
+++ linux-2.6-tip/include/linux/rwlock_api_smp.h
@@ -0,0 +1,277 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _read_trylock(rwlock_t *lock);
+int __lockfunc _write_trylock(rwlock_t *lock);
+void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _read_lock(lock) __read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _write_lock(lock) __write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _read_lock_bh(lock) __read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _write_lock_bh(lock) __write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _read_lock_irq(lock) __read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _write_lock_irq(lock) __write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _read_trylock(lock) __read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _write_trylock(lock) __write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _read_unlock(lock) __read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _write_unlock(lock) __write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline void __read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
Index: linux-2.6-tip/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6-tip/include/linux/spinlock_api_smp.h
@@ -24,102 +24,41 @@ void __lockfunc _spin_lock_nested(spinlo
__acquires(lock);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
+
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
__acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
int __lockfunc _spin_trylock_bh(spinlock_t *lock);
void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
__releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
#define _spin_lock(lock) __spin_lock(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
#define _spin_lock_bh(lock) __spin_lock_bh(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
#define _spin_lock_irq(lock) __spin_lock_irq(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
#define _spin_trylock(lock) __spin_trylock(lock)
#endif
-#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
#endif
@@ -128,50 +67,18 @@ void __lockfunc _write_unlock_irqrestore
#define _spin_unlock(lock) __spin_unlock(lock)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
-#endif
-
static inline int __spin_trylock(spinlock_t *lock)
{
preempt_disable();
@@ -183,28 +90,6 @@ static inline int __spin_trylock(spinloc
return 0;
}
-static inline int __read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
-static inline int __write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,13 +97,6 @@ static inline int __write_trylock(rwlock
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
{
unsigned long flags;
@@ -255,62 +133,6 @@ static inline void __spin_lock_bh(spinlo
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-
-static inline void __read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline void __read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-
-static inline void __write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-static inline void __write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
static inline void __spin_lock(spinlock_t *lock)
{
preempt_disable();
@@ -318,13 +140,6 @@ static inline void __spin_lock(spinlock_
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-static inline void __write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
#endif /* CONFIG_PREEMPT */
static inline void __spin_unlock(spinlock_t *lock)
@@ -334,20 +149,6 @@ static inline void __spin_unlock(spinloc
preempt_enable();
}
-static inline void __write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-
-static inline void __read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable();
-}
-
static inline void __spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
{
@@ -373,55 +174,6 @@ static inline void __spin_unlock_bh(spin
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
- unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
static inline int __spin_trylock_bh(spinlock_t *lock)
{
local_bh_disable();
@@ -435,4 +187,6 @@ static inline int __spin_trylock_bh(spin
return 0;
}
+#include <linux/rwlock_api_smp.h>
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 03/23] locking: Separate rwlock api from spinlock api
2009-12-06 18:02 ` [patch 03/23] locking: Separate rwlock api from spinlock api Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-split-rwlock-api-from-spinlock-api.patch --]
[-- Type: text/plain, Size: 19163 bytes --]
Move the rwlock smp api defines and functions into a separate header
file. Makes the -rt selection simpler and less intrusive.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rwlock_api_smp.h | 277 +++++++++++++++++++++++++++++++++++++++
include/linux/spinlock_api_smp.h | 252 -----------------------------------
2 files changed, 280 insertions(+), 249 deletions(-)
Index: linux-2.6-tip/include/linux/rwlock_api_smp.h
===================================================================
--- /dev/null
+++ linux-2.6-tip/include/linux/rwlock_api_smp.h
@@ -0,0 +1,277 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _read_trylock(rwlock_t *lock);
+int __lockfunc _write_trylock(rwlock_t *lock);
+void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _read_lock(lock) __read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _write_lock(lock) __write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _read_lock_bh(lock) __read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _write_lock_bh(lock) __write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _read_lock_irq(lock) __read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _write_lock_irq(lock) __write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _read_trylock(lock) __read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _write_trylock(lock) __write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _read_unlock(lock) __read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _write_unlock(lock) __write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline void __read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
Index: linux-2.6-tip/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6-tip/include/linux/spinlock_api_smp.h
@@ -24,102 +24,41 @@ void __lockfunc _spin_lock_nested(spinlo
__acquires(lock);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
+
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
__acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
int __lockfunc _spin_trylock_bh(spinlock_t *lock);
void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
__releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
#define _spin_lock(lock) __spin_lock(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
#define _spin_lock_bh(lock) __spin_lock_bh(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
#define _spin_lock_irq(lock) __spin_lock_irq(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
#endif
-#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
#define _spin_trylock(lock) __spin_trylock(lock)
#endif
-#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
#endif
@@ -128,50 +67,18 @@ void __lockfunc _write_unlock_irqrestore
#define _spin_unlock(lock) __spin_unlock(lock)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
-#endif
-
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
#endif
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
-#endif
-
static inline int __spin_trylock(spinlock_t *lock)
{
preempt_disable();
@@ -183,28 +90,6 @@ static inline int __spin_trylock(spinloc
return 0;
}
-static inline int __read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
-static inline int __write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,13 +97,6 @@ static inline int __write_trylock(rwlock
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
{
unsigned long flags;
@@ -255,62 +133,6 @@ static inline void __spin_lock_bh(spinlo
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-
-static inline void __read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline void __read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-
-static inline void __write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-static inline void __write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
static inline void __spin_lock(spinlock_t *lock)
{
preempt_disable();
@@ -318,13 +140,6 @@ static inline void __spin_lock(spinlock_
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-static inline void __write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
#endif /* CONFIG_PREEMPT */
static inline void __spin_unlock(spinlock_t *lock)
@@ -334,20 +149,6 @@ static inline void __spin_unlock(spinloc
preempt_enable();
}
-static inline void __write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-
-static inline void __read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable();
-}
-
static inline void __spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
{
@@ -373,55 +174,6 @@ static inline void __spin_unlock_bh(spin
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
- unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
static inline int __spin_trylock_bh(spinlock_t *lock)
{
local_bh_disable();
@@ -435,4 +187,6 @@ static inline int __spin_trylock_bh(spin
return 0;
}
+#include <linux/rwlock_api_smp.h>
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 04/23] locking: Convert raw_spinlock to arch_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (2 preceding siblings ...)
2009-12-06 18:02 ` [patch 03/23] locking: Separate rwlock api from spinlock api Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:10 ` Linus Torvalds
2009-12-06 18:02 ` [patch 05/23] locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Thomas Gleixner
` (18 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-convert-raw-to-arch-spinlock.patch --]
[-- Type: text/plain, Size: 53316 bytes --]
The raw_spin* namespace was taken by lockdep for the architecture
specific implementations. raw_spin_* would be the ideal name space for
the spinlocks which are not converted to sleeping locks in preempt-rt.
Linus suggested to convert the raw_ to arch_ locks and cleanup the
name space instead of using an artifical name like core_spin,
atomic_spin or whatever
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock.h | 6 ++---
arch/alpha/include/asm/spinlock_types.h | 2 -
arch/arm/include/asm/spinlock.h | 6 ++---
arch/arm/include/asm/spinlock_types.h | 2 -
arch/blackfin/include/asm/spinlock.h | 10 ++++-----
arch/blackfin/include/asm/spinlock_types.h | 2 -
arch/cris/include/arch-v32/arch/spinlock.h | 12 +++++------
arch/ia64/include/asm/spinlock.h | 26 ++++++++++++-------------
arch/ia64/include/asm/spinlock_types.h | 2 -
arch/m32r/include/asm/spinlock.h | 6 ++---
arch/m32r/include/asm/spinlock_types.h | 2 -
arch/mips/include/asm/spinlock.h | 10 ++++-----
arch/mips/include/asm/spinlock_types.h | 2 -
arch/parisc/include/asm/atomic.h | 6 ++---
arch/parisc/include/asm/spinlock.h | 8 +++----
arch/parisc/include/asm/spinlock_types.h | 4 +--
arch/parisc/lib/bitops.c | 2 -
arch/powerpc/include/asm/rtas.h | 2 -
arch/powerpc/include/asm/spinlock.h | 14 ++++++-------
arch/powerpc/include/asm/spinlock_types.h | 2 -
arch/powerpc/kernel/rtas.c | 2 -
arch/powerpc/lib/locks.c | 4 +--
arch/powerpc/platforms/pasemi/setup.c | 2 -
arch/s390/include/asm/spinlock.h | 16 +++++++--------
arch/s390/include/asm/spinlock_types.h | 2 -
arch/s390/lib/spinlock.c | 8 +++----
arch/sh/include/asm/spinlock.h | 6 ++---
arch/sh/include/asm/spinlock_types.h | 2 -
arch/sparc/include/asm/spinlock_32.h | 6 ++---
arch/sparc/include/asm/spinlock_64.h | 8 +++----
arch/sparc/include/asm/spinlock_types.h | 2 -
arch/x86/include/asm/paravirt.h | 12 +++++------
arch/x86/include/asm/paravirt_types.h | 14 ++++++-------
arch/x86/include/asm/spinlock.h | 30 ++++++++++++++---------------
arch/x86/include/asm/spinlock_types.h | 4 +--
arch/x86/kernel/dumpstack.c | 2 -
arch/x86/kernel/paravirt-spinlocks.c | 2 -
arch/x86/kernel/tsc_sync.c | 2 -
arch/x86/xen/spinlock.c | 16 +++++++--------
include/asm-generic/bitops/atomic.h | 6 ++---
include/linux/spinlock.h | 4 +--
include/linux/spinlock_types.h | 2 -
include/linux/spinlock_types_up.h | 4 +--
include/linux/spinlock_up.h | 8 +++----
kernel/lockdep.c | 2 -
kernel/trace/ring_buffer.c | 4 +--
kernel/trace/trace.c | 18 ++++++++---------
kernel/trace/trace_clock.c | 4 +--
kernel/trace/trace_sched_wakeup.c | 4 +--
kernel/trace/trace_stack.c | 4 +--
lib/spinlock_debug.c | 2 -
51 files changed, 164 insertions(+), 164 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock.h
@@ -17,13 +17,13 @@
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
-static inline void __raw_spin_unlock(raw_spinlock_t * lock)
+static inline void __raw_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
-static inline void __raw_spin_lock(raw_spinlock_t * lock)
+static inline void __raw_spin_lock(arch_spinlock_t * lock)
{
long tmp;
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_s
: "m"(lock->lock) : "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/arm/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock.h
@@ -23,7 +23,7 @@
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_s
smp_mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw
}
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
Index: linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
@@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(vol
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
@@ -15,7 +15,7 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/cris/include/arch-v32/arch/spinlock.h
+++ linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, in
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int __raw_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
@@ -22,24 +22,24 @@ static inline void __raw_spin_unlock(raw
: "memory");
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
}
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock.h
@@ -38,7 +38,7 @@
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket, serve;
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spi
}
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->lock);
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin
return 0;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spi
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
-static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;
@@ -89,53 +89,53 @@ static __always_inline void __ticket_spi
}
}
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define __raw_spin_is_contended __raw_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
__raw_spin_lock(lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock.h
@@ -36,7 +36,7 @@
* __raw_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
int oldval;
unsigned long tmp1, tmp2;
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw
return (oldval > 0);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp0, tmp1;
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_s
);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
mb();
lock->slock = 1;
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
Index: linux-2.6-tip/arch/mips/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock.h
@@ -34,7 +34,7 @@
* becomes equal to the the initial value of the tail.
*/
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
@@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(r
#define __raw_spin_unlock_wait(x) \
while (__raw_spin_is_locked(x)) { cpu_relax(); }
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
@@ -53,7 +53,7 @@ static inline int __raw_spin_is_contende
}
#define __raw_spin_is_contended __raw_spin_is_contended
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_s
smp_llsc_mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
int tmp;
@@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw
}
}
-static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
Index: linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
@@ -12,7 +12,7 @@ typedef struct {
* bits 15..28: ticket
*/
unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/parisc/include/asm/atomic.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/atomic.h
+++ linux-2.6-tip/arch/parisc/include/asm/atomic.h
@@ -27,18 +27,18 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock.h
@@ -5,7 +5,7 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int __raw_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
@@ -15,7 +15,7 @@ static inline int __raw_spin_is_locked(r
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
-static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags
mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *x)
+static inline void __raw_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw
mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *x)
+static inline int __raw_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
@@ -9,10 +9,10 @@ typedef struct {
volatile unsigned int lock[4];
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
-} raw_spinlock_t;
+} arch_spinlock_t;
typedef struct {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
volatile int counter;
} raw_rwlock_t;
Index: linux-2.6-tip/arch/parisc/lib/bitops.c
===================================================================
--- linux-2.6-tip.orig/arch/parisc/lib/bitops.c
+++ linux-2.6-tip/arch/parisc/lib/bitops.c
@@ -12,7 +12,7 @@
#include <asm/atomic.h>
#ifdef CONFIG_SMP
-raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
};
#endif
Index: linux-2.6-tip/arch/powerpc/include/asm/rtas.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/rtas.h
+++ linux-2.6-tip/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
-static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
@@ -73,7 +73,7 @@ static inline unsigned long arch_spin_tr
return tmp;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
return arch_spin_trylock(lock) == 0;
@@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
-extern void __spin_yield(raw_spinlock_t *lock);
+extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(raw_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
#define __spin_yield(x) barrier()
@@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *loc
#define SHARED_PROCESSOR 0
#endif
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
@@ -120,7 +120,7 @@ static inline void __raw_spin_lock(raw_s
}
static inline
-void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long flags_dis;
@@ -140,7 +140,7 @@ void __raw_spin_lock_flags(raw_spinlock_
}
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
SYNC_IO;
__asm__ __volatile__("# __raw_spin_unlock\n\t"
@@ -149,7 +149,7 @@ static inline void __raw_spin_unlock(raw
}
#ifdef CONFIG_PPC64
-extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+extern void __raw_spin_unlock_wait(arch_spinlock_t *lock);
#else
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/powerpc/kernel/rtas.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/kernel/rtas.c
+++ linux-2.6-tip/arch/powerpc/kernel/rtas.c
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsig
return 1;
}
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
static u64 timebase = 0;
void __cpuinit rtas_give_timebase(void)
Index: linux-2.6-tip/arch/powerpc/lib/locks.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/lib/locks.c
+++ linux-2.6-tip/arch/powerpc/lib/locks.c
@@ -25,7 +25,7 @@
#include <asm/smp.h>
#include <asm/firmware.h>
-void __spin_yield(raw_spinlock_t *lock)
+void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
}
#endif
-void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (lock->slock) {
HMT_low();
Index: linux-2.6-tip/arch/powerpc/platforms/pasemi/setup.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/pasemi/setup.c
+++ linux-2.6-tip/arch/powerpc/platforms/pasemi/setup.c
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
}
#ifdef CONFIG_SMP
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
static unsigned long timebase;
static void __devinit pas_give_timebase(void)
Index: linux-2.6-tip/arch/s390/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock.h
@@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned
do { while (__raw_spin_is_locked(lock)) \
_raw_spin_relax(lock); } while (0)
-extern void _raw_spin_lock_wait(raw_spinlock_t *);
-extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
-extern int _raw_spin_trylock_retry(raw_spinlock_t *);
-extern void _raw_spin_relax(raw_spinlock_t *lock);
+extern void _raw_spin_lock_wait(arch_spinlock_t *);
+extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+extern int _raw_spin_trylock_retry(arch_spinlock_t *);
+extern void _raw_spin_relax(arch_spinlock_t *lock);
-static inline void __raw_spin_lock(raw_spinlock_t *lp)
+static inline void __raw_spin_lock(arch_spinlock_t *lp)
{
int old;
@@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_s
_raw_spin_lock_wait(lp);
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
+static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
int old;
@@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags
_raw_spin_lock_wait_flags(lp, flags);
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lp)
+static inline int __raw_spin_trylock(arch_spinlock_t *lp)
{
int old;
@@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw
return _raw_spin_trylock_retry(lp);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lp)
+static inline void __raw_spin_unlock(arch_spinlock_t *lp)
{
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
}
Index: linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int owner_cpu;
-} __attribute__ ((aligned (4))) raw_spinlock_t;
+} __attribute__ ((aligned (4))) arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/s390/lib/spinlock.c
===================================================================
--- linux-2.6-tip.orig/arch/s390/lib/spinlock.c
+++ linux-2.6-tip/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cp
_raw_yield();
}
-void _raw_spin_lock_wait(raw_spinlock_t *lp)
+void _raw_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t
}
EXPORT_SYMBOL(_raw_spin_lock_wait);
-void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
+void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinl
}
EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
-int _raw_spin_trylock_retry(raw_spinlock_t *lp)
+int _raw_spin_trylock_retry(arch_spinlock_t *lp)
{
unsigned int cpu = ~smp_processor_id();
int count;
@@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock
}
EXPORT_SYMBOL(_raw_spin_trylock_retry);
-void _raw_spin_relax(raw_spinlock_t *lock)
+void _raw_spin_relax(arch_spinlock_t *lock)
{
unsigned int cpu = lock->owner_cpu;
if (cpu != 0)
Index: linux-2.6-tip/arch/sh/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock.h
@@ -34,7 +34,7 @@
*
* We make no fairness assumptions. They have a cost.
*/
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long oldval;
@@ -54,7 +54,7 @@ static inline void __raw_spin_lock(raw_s
);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -67,7 +67,7 @@ static inline void __raw_spin_unlock(raw
);
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, oldval;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_32.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
@@ -15,7 +15,7 @@
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
"\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_s
: "g2", "memory", "cc");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw
return (result == 0);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
}
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_64.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
@@ -27,7 +27,7 @@
do { rmb(); \
} while((lp)->lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_s
: "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw
return (result == 0UL);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw
: "memory");
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long tmp1, tmp2;
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
typedef struct {
volatile unsigned char lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/x86/include/asm/paravirt.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/paravirt.h
+++ linux-2.6-tip/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
+static inline int __raw_spin_is_locked(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
}
-static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
+static inline int __raw_spin_is_contended(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
}
#define __raw_spin_is_contended __raw_spin_is_contended
-static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
+static __always_inline void __raw_spin_lock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
}
-static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
+static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock,
unsigned long flags)
{
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
}
-static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
+static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
}
-static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
+static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
}
Index: linux-2.6-tip/arch/x86/include/asm/paravirt_types.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/paravirt_types.h
+++ linux-2.6-tip/arch/x86/include/asm/paravirt_types.h
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
phys_addr_t phys, pgprot_t flags);
};
-struct raw_spinlock;
+struct arch_spinlock;
struct pv_lock_ops {
- int (*spin_is_locked)(struct raw_spinlock *lock);
- int (*spin_is_contended)(struct raw_spinlock *lock);
- void (*spin_lock)(struct raw_spinlock *lock);
- void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct raw_spinlock *lock);
- void (*spin_unlock)(struct raw_spinlock *lock);
+ int (*spin_is_locked)(struct arch_spinlock *lock);
+ int (*spin_is_contended)(struct arch_spinlock *lock);
+ void (*spin_lock)(struct arch_spinlock *lock);
+ void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct arch_spinlock *lock);
+ void (*spin_unlock)(struct arch_spinlock *lock);
};
/* This contains all the paravirt structures: we get a convenient
Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock.h
@@ -58,7 +58,7 @@
#if (NR_CPUS < 256)
#define TICKET_SHIFT 8
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
short inc = 0x0100;
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spi
: "memory", "cc");
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp, new;
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin
return tmp;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
: "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spi
#else
#define TICKET_SHIFT 16
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int inc = 0x00010000;
int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spi
: "memory", "cc");
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp;
int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin
return tmp;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
: "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spi
}
#endif
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
@@ -174,33 +174,33 @@ static inline int __ticket_spin_is_conte
#ifndef CONFIG_PARAVIRT_SPINLOCKS
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define __raw_spin_is_contended __raw_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
__raw_spin_lock(lock);
@@ -208,7 +208,7 @@ static __always_inline void __raw_spin_l
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
Index: linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
@@ -5,9 +5,9 @@
# error "please don't include this file directly"
#endif
-typedef struct raw_spinlock {
+typedef struct arch_spinlock {
unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
Index: linux-2.6-tip/arch/x86/kernel/dumpstack.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/dumpstack.c
+++ linux-2.6-tip/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
}
EXPORT_SYMBOL(dump_stack);
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
Index: linux-2.6-tip/arch/x86/kernel/paravirt-spinlocks.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/paravirt-spinlocks.c
+++ linux-2.6-tip/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,7 +8,7 @@
#include <asm/paravirt.h>
static inline void
-default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
}
Index: linux-2.6-tip/arch/x86/kernel/tsc_sync.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/tsc_sync.c
+++ linux-2.6-tip/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
-static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
Index: linux-2.6-tip/arch/x86/xen/spinlock.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/xen/spinlock.c
+++ linux-2.6-tip/arch/x86/xen/spinlock.c
@@ -120,14 +120,14 @@ struct xen_spinlock {
unsigned short spinners; /* count of waiting cpus */
};
-static int xen_spin_is_locked(struct raw_spinlock *lock)
+static int xen_spin_is_locked(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
return xl->lock != 0;
}
-static int xen_spin_is_contended(struct raw_spinlock *lock)
+static int xen_spin_is_contended(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct
return xl->spinners != 0;
}
-static int xen_spin_trylock(struct raw_spinlock *lock)
+static int xen_spin_trylock(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struc
__get_cpu_var(lock_spinners) = prev;
}
-static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
+static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
return ret;
}
-static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
+static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struc
spin_time_accum_total(start_spin);
}
-static void xen_spin_lock(struct raw_spinlock *lock)
+static void xen_spin_lock(struct arch_spinlock *lock)
{
__xen_spin_lock(lock, false);
}
-static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
+static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
{
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
}
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slo
}
}
-static void xen_spin_unlock(struct raw_spinlock *lock)
+static void xen_spin_unlock(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
Index: linux-2.6-tip/include/asm-generic/bitops/atomic.h
===================================================================
--- linux-2.6-tip.orig/include/asm-generic/bitops/atomic.h
+++ linux-2.6-tip/include/asm-generic/bitops/atomic.h
@@ -15,18 +15,18 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -8,7 +8,7 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
@@ -75,7 +75,7 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and raw_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
Index: linux-2.6-tip/include/linux/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types.h
+++ linux-2.6-tip/include/linux/spinlock_types.h
@@ -18,7 +18,7 @@
#include <linux/lockdep.h>
typedef struct {
- raw_spinlock_t raw_lock;
+ arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
Index: linux-2.6-tip/include/linux/spinlock_types_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types_up.h
+++ linux-2.6-tip/include/linux/spinlock_types_up.h
@@ -16,13 +16,13 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
#else
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { }
Index: linux-2.6-tip/include/linux/spinlock_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_up.h
+++ linux-2.6-tip/include/linux/spinlock_up.h
@@ -20,19 +20,19 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define __raw_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw
return oldval > 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}
Index: linux-2.6-tip/kernel/lockdep.c
===================================================================
--- linux-2.6-tip.orig/kernel/lockdep.c
+++ linux-2.6-tip/kernel/lockdep.c
@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
-static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
Index: linux-2.6-tip/kernel/trace/ring_buffer.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/ring_buffer.c
+++ linux-2.6-tip/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
spinlock_t reader_lock; /* serialize readers */
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffe
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
Index: linux-2.6-tip/kernel/trace/trace.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace.c
+++ linux-2.6-tip/kernel/trace/trace.c
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struc
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
- * This is defined as a raw_spinlock_t in order to help
+ * This is defined as a arch_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
-static raw_spinlock_t ftrace_max_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t ftrace_max_lock =
+ (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_M
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsig
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
- static raw_spinlock_t trace_buf_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock =
+ (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_arra
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
@@ -4268,8 +4268,8 @@ trace_printk_seq(struct trace_seq *s)
static void __ftrace_dump(bool disable_tracing)
{
- static raw_spinlock_t ftrace_dump_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t ftrace_dump_lock =
+ (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
Index: linux-2.6-tip/kernel/trace/trace_clock.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_clock.c
+++ linux-2.6-tip/kernel/trace/trace_clock.c
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
/* keep prev_time and lock in the same cacheline. */
static struct {
u64 prev_time;
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
} trace_clock_struct ____cacheline_aligned_in_smp =
{
- .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+ .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
};
u64 notrace trace_clock_global(void)
Index: linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_sched_wakeup.c
+++ linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
@@ -28,8 +28,8 @@ static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
static int wakeup_rt;
-static raw_spinlock_t wakeup_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t wakeup_lock =
+ (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr);
Index: linux-2.6-tip/kernel/trace/trace_stack.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_stack.c
+++ linux-2.6-tip/kernel/trace/trace_stack.c
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trac
};
static unsigned long max_stack_size;
-static raw_spinlock_t max_stack_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t max_stack_lock =
+ (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 05/23] locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (3 preceding siblings ...)
2009-12-06 18:02 ` [patch 04/23] locking: Convert raw_spinlock to arch_spinlock Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 06/23] locking: Convert __raw_spin* functions to arch_spin* Thomas Gleixner
` (17 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-rename-raw-lock-initializer.patch --]
[-- Type: text/plain, Size: 16140 bytes --]
Further name space cleanup. No functional change
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock_types.h | 2 +-
arch/arm/include/asm/spinlock_types.h | 2 +-
arch/blackfin/include/asm/spinlock_types.h | 2 +-
arch/ia64/include/asm/spinlock_types.h | 2 +-
arch/m32r/include/asm/spinlock_types.h | 2 +-
arch/mips/include/asm/spinlock_types.h | 2 +-
arch/parisc/include/asm/spinlock_types.h | 6 +++---
arch/parisc/lib/bitops.c | 2 +-
arch/powerpc/include/asm/spinlock_types.h | 2 +-
arch/powerpc/kernel/rtas.c | 2 +-
arch/s390/include/asm/spinlock_types.h | 2 +-
arch/sh/include/asm/spinlock_types.h | 2 +-
arch/sparc/include/asm/spinlock_types.h | 2 +-
arch/x86/include/asm/spinlock_types.h | 2 +-
arch/x86/kernel/dumpstack.c | 2 +-
arch/x86/kernel/tsc_sync.c | 2 +-
include/linux/spinlock_types.h | 4 ++--
include/linux/spinlock_types_up.h | 4 ++--
kernel/lockdep.c | 2 +-
kernel/trace/ring_buffer.c | 2 +-
kernel/trace/trace.c | 10 +++++-----
kernel/trace/trace_clock.c | 2 +-
kernel/trace/trace_sched_wakeup.c | 2 +-
kernel/trace/trace_stack.c | 2 +-
lib/spinlock_debug.c | 2 +-
25 files changed, 33 insertions(+), 33 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
@@ -17,7 +17,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int read_counter : 31;
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile int lock;
Index: linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
@@ -14,7 +14,7 @@ typedef struct {
unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
@@ -4,10 +4,10 @@
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
-# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
-# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} arch_spinlock_t;
@@ -16,6 +16,6 @@ typedef struct {
volatile int counter;
} raw_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
Index: linux-2.6-tip/arch/parisc/lib/bitops.c
===================================================================
--- linux-2.6-tip.orig/arch/parisc/lib/bitops.c
+++ linux-2.6-tip/arch/parisc/lib/bitops.c
@@ -13,7 +13,7 @@
#ifdef CONFIG_SMP
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile signed int lock;
Index: linux-2.6-tip/arch/powerpc/kernel/rtas.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/kernel/rtas.c
+++ linux-2.6-tip/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,7 @@
#include <asm/mmu.h>
struct rtas_t rtas = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
Index: linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int owner_cpu;
} __attribute__ ((aligned (4))) arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned char lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct arch_spinlock {
unsigned int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
unsigned int lock;
Index: linux-2.6-tip/arch/x86/kernel/dumpstack.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/dumpstack.c
+++ linux-2.6-tip/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
}
EXPORT_SYMBOL(dump_stack);
-static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
Index: linux-2.6-tip/arch/x86/kernel/tsc_sync.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/tsc_sync.c
+++ linux-2.6-tip/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
-static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
Index: linux-2.6-tip/include/linux/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types.h
+++ linux-2.6-tip/include/linux/spinlock_types.h
@@ -43,14 +43,14 @@ typedef struct {
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
#else
# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
#endif
Index: linux-2.6-tip/include/linux/spinlock_types_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types_up.h
+++ linux-2.6-tip/include/linux/spinlock_types_up.h
@@ -18,13 +18,13 @@ typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
typedef struct { } arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif
Index: linux-2.6-tip/kernel/lockdep.c
===================================================================
--- linux-2.6-tip.orig/kernel/lockdep.c
+++ linux-2.6-tip/kernel/lockdep.c
@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
Index: linux-2.6-tip/kernel/trace/ring_buffer.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/ring_buffer.c
+++ linux-2.6-tip/kernel/trace/ring_buffer.c
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffe
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
Index: linux-2.6-tip/kernel/trace/trace.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace.c
+++ linux-2.6-tip/kernel/trace/trace.c
@@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struc
* CONFIG_TRACER_MAX_TRACE.
*/
static arch_spinlock_t ftrace_max_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_M
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsig
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
static arch_spinlock_t trace_buf_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_arra
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
@@ -4269,7 +4269,7 @@ trace_printk_seq(struct trace_seq *s)
static void __ftrace_dump(bool disable_tracing)
{
static arch_spinlock_t ftrace_dump_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
Index: linux-2.6-tip/kernel/trace/trace_clock.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_clock.c
+++ linux-2.6-tip/kernel/trace/trace_clock.c
@@ -74,7 +74,7 @@ static struct {
arch_spinlock_t lock;
} trace_clock_struct ____cacheline_aligned_in_smp =
{
- .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+ .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
};
u64 notrace trace_clock_global(void)
Index: linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_sched_wakeup.c
+++ linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
@@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1;
static int wakeup_rt;
static arch_spinlock_t wakeup_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr);
Index: linux-2.6-tip/kernel/trace/trace_stack.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_stack.c
+++ linux-2.6-tip/kernel/trace/trace_stack.c
@@ -28,7 +28,7 @@ static struct stack_trace max_stack_trac
static unsigned long max_stack_size;
static arch_spinlock_t max_stack_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 05/23] locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED
2009-12-06 18:02 ` [patch 05/23] locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-rename-raw-lock-initializer.patch --]
[-- Type: text/plain, Size: 16142 bytes --]
Further name space cleanup. No functional change
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock_types.h | 2 +-
arch/arm/include/asm/spinlock_types.h | 2 +-
arch/blackfin/include/asm/spinlock_types.h | 2 +-
arch/ia64/include/asm/spinlock_types.h | 2 +-
arch/m32r/include/asm/spinlock_types.h | 2 +-
arch/mips/include/asm/spinlock_types.h | 2 +-
arch/parisc/include/asm/spinlock_types.h | 6 +++---
arch/parisc/lib/bitops.c | 2 +-
arch/powerpc/include/asm/spinlock_types.h | 2 +-
arch/powerpc/kernel/rtas.c | 2 +-
arch/s390/include/asm/spinlock_types.h | 2 +-
arch/sh/include/asm/spinlock_types.h | 2 +-
arch/sparc/include/asm/spinlock_types.h | 2 +-
arch/x86/include/asm/spinlock_types.h | 2 +-
arch/x86/kernel/dumpstack.c | 2 +-
arch/x86/kernel/tsc_sync.c | 2 +-
include/linux/spinlock_types.h | 4 ++--
include/linux/spinlock_types_up.h | 4 ++--
kernel/lockdep.c | 2 +-
kernel/trace/ring_buffer.c | 2 +-
kernel/trace/trace.c | 10 +++++-----
kernel/trace/trace_clock.c | 2 +-
kernel/trace/trace_sched_wakeup.c | 2 +-
kernel/trace/trace_stack.c | 2 +-
lib/spinlock_debug.c | 2 +-
25 files changed, 33 insertions(+), 33 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
@@ -17,7 +17,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int read_counter : 31;
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile int lock;
Index: linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
@@ -14,7 +14,7 @@ typedef struct {
unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
@@ -4,10 +4,10 @@
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
-# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
-# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} arch_spinlock_t;
@@ -16,6 +16,6 @@ typedef struct {
volatile int counter;
} raw_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
Index: linux-2.6-tip/arch/parisc/lib/bitops.c
===================================================================
--- linux-2.6-tip.orig/arch/parisc/lib/bitops.c
+++ linux-2.6-tip/arch/parisc/lib/bitops.c
@@ -13,7 +13,7 @@
#ifdef CONFIG_SMP
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile signed int lock;
Index: linux-2.6-tip/arch/powerpc/kernel/rtas.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/kernel/rtas.c
+++ linux-2.6-tip/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,7 @@
#include <asm/mmu.h>
struct rtas_t rtas = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
Index: linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int owner_cpu;
} __attribute__ ((aligned (4))) arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned char lock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
Index: linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct arch_spinlock {
unsigned int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
unsigned int lock;
Index: linux-2.6-tip/arch/x86/kernel/dumpstack.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/dumpstack.c
+++ linux-2.6-tip/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
}
EXPORT_SYMBOL(dump_stack);
-static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
Index: linux-2.6-tip/arch/x86/kernel/tsc_sync.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/tsc_sync.c
+++ linux-2.6-tip/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
-static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
Index: linux-2.6-tip/include/linux/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types.h
+++ linux-2.6-tip/include/linux/spinlock_types.h
@@ -43,14 +43,14 @@ typedef struct {
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
#else
# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
#endif
Index: linux-2.6-tip/include/linux/spinlock_types_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types_up.h
+++ linux-2.6-tip/include/linux/spinlock_types_up.h
@@ -18,13 +18,13 @@ typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
typedef struct { } arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif
Index: linux-2.6-tip/kernel/lockdep.c
===================================================================
--- linux-2.6-tip.orig/kernel/lockdep.c
+++ linux-2.6-tip/kernel/lockdep.c
@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
Index: linux-2.6-tip/kernel/trace/ring_buffer.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/ring_buffer.c
+++ linux-2.6-tip/kernel/trace/ring_buffer.c
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffe
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
Index: linux-2.6-tip/kernel/trace/trace.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace.c
+++ linux-2.6-tip/kernel/trace/trace.c
@@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struc
* CONFIG_TRACER_MAX_TRACE.
*/
static arch_spinlock_t ftrace_max_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_M
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsig
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
static arch_spinlock_t trace_buf_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_arra
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
@@ -4269,7 +4269,7 @@ trace_printk_seq(struct trace_seq *s)
static void __ftrace_dump(bool disable_tracing)
{
static arch_spinlock_t ftrace_dump_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
Index: linux-2.6-tip/kernel/trace/trace_clock.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_clock.c
+++ linux-2.6-tip/kernel/trace/trace_clock.c
@@ -74,7 +74,7 @@ static struct {
arch_spinlock_t lock;
} trace_clock_struct ____cacheline_aligned_in_smp =
{
- .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+ .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
};
u64 notrace trace_clock_global(void)
Index: linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_sched_wakeup.c
+++ linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
@@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1;
static int wakeup_rt;
static arch_spinlock_t wakeup_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr);
Index: linux-2.6-tip/kernel/trace/trace_stack.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_stack.c
+++ linux-2.6-tip/kernel/trace/trace_stack.c
@@ -28,7 +28,7 @@ static struct stack_trace max_stack_trac
static unsigned long max_stack_size;
static arch_spinlock_t max_stack_lock =
- (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 06/23] locking: Convert __raw_spin* functions to arch_spin*
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (4 preceding siblings ...)
2009-12-06 18:02 ` [patch 05/23] locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 07/23] locking: Convert raw_rwlock to arch_rwlock Thomas Gleixner
` (16 subsequent siblings)
22 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-convert-arch-raw-lock-functions.patch --]
[-- Type: text/plain, Size: 70705 bytes --]
Name space cleanup. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock.h | 18 +++++------
arch/arm/include/asm/spinlock.h | 20 ++++++------
arch/blackfin/include/asm/spinlock.h | 20 ++++++------
arch/cris/include/arch-v32/arch/spinlock.h | 46 ++++++++++++++---------------
arch/ia64/include/asm/bitops.h | 2 -
arch/ia64/include/asm/spinlock.h | 26 ++++++++--------
arch/m32r/include/asm/spinlock.h | 28 ++++++++---------
arch/mips/include/asm/spinlock.h | 36 +++++++++++-----------
arch/parisc/include/asm/atomic.h | 4 +-
arch/parisc/include/asm/spinlock.h | 44 +++++++++++++--------------
arch/powerpc/include/asm/spinlock.h | 32 ++++++++++----------
arch/powerpc/kernel/rtas.c | 12 +++----
arch/powerpc/lib/locks.c | 4 +-
arch/powerpc/platforms/pasemi/setup.c | 8 ++---
arch/s390/include/asm/spinlock.h | 36 +++++++++++-----------
arch/s390/lib/spinlock.c | 22 ++++++-------
arch/sh/include/asm/spinlock.h | 26 ++++++++--------
arch/sparc/include/asm/spinlock_32.h | 20 ++++++------
arch/sparc/include/asm/spinlock_64.h | 18 +++++------
arch/x86/include/asm/paravirt.h | 14 ++++----
arch/x86/include/asm/spinlock.h | 26 ++++++++--------
arch/x86/kernel/dumpstack.c | 6 +--
arch/x86/kernel/paravirt-spinlocks.c | 2 -
arch/x86/kernel/tsc_sync.c | 8 ++---
include/asm-generic/bitops/atomic.h | 4 +-
include/linux/spinlock.h | 22 ++++++-------
include/linux/spinlock_up.h | 26 ++++++++--------
kernel/lockdep.c | 18 +++++------
kernel/mutex-debug.h | 4 +-
kernel/spinlock.c | 4 +-
kernel/trace/ring_buffer.c | 12 +++----
kernel/trace/trace.c | 32 ++++++++++----------
kernel/trace/trace_clock.c | 4 +-
kernel/trace/trace_sched_wakeup.c | 12 +++----
kernel/trace/trace_selftest.c | 4 +-
kernel/trace/trace_stack.c | 12 +++----
lib/spinlock_debug.c | 8 ++---
37 files changed, 320 insertions(+), 320 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock.h
@@ -12,18 +12,18 @@
* We make no fairness assumptions. They have a cost.
*/
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(x) \
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
-static inline void __raw_spin_unlock(arch_spinlock_t * lock)
+static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
-static inline void __raw_spin_lock(arch_spinlock_t * lock)
+static inline void arch_spin_lock(arch_spinlock_t * lock)
{
long tmp;
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_
: "m"(lock->lock) : "memory");
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
@@ -169,8 +169,8 @@ static inline void __raw_write_unlock(ra
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ALPHA_SPINLOCK_H */
Index: linux-2.6-tip/arch/arm/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock.h
@@ -17,13 +17,13 @@
* Locked value: 1
*/
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_
smp_mb();
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arc
}
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
@@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
@@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(vol
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
-static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
@@ -92,9 +92,9 @@ static inline void __raw_write_unlock(ra
__raw_write_unlock_asm(&rw->lock);
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif
Index: linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/cris/include/arch-v32/arch/spinlock.h
+++ linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, in
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
-static inline int __raw_spin_is_locked(arch_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(arc
: "memory");
}
-static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
-__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
/*
@@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(r
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
rw->lock++;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return ret;
}
static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return 1;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */
Index: linux-2.6-tip/arch/ia64/include/asm/bitops.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/bitops.h
+++ linux-2.6-tip/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
- * with release semantics. See also __raw_spin_unlock().
+ * with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
#include <asm/intrinsics.h>
#include <asm/system.h>
-#define __raw_spin_lock_init(x) ((x)->lock = 0)
+#define arch_spin_lock_init(x) ((x)->lock = 0)
/*
* Ticket locks are conceptually two parts, one indicating the current head of
@@ -103,39 +103,39 @@ static inline int __ticket_spin_is_conte
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
-static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
-static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_IA64_SPINLOCK_H */
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock.h
@@ -24,19 +24,19 @@
* We make no fairness assumptions. They have a cost.
*/
-#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
/**
- * __raw_spin_trylock - Try spin lock and return a result
+ * arch_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
*
- * __raw_spin_trylock() tries to get the lock and returns a result.
+ * arch_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
int oldval;
unsigned long tmp1, tmp2;
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(arc
* }
*/
__asm__ __volatile__ (
- "# __raw_spin_trylock \n\t"
+ "# arch_spin_trylock \n\t"
"ldi %1, #0; \n\t"
"mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t"
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(arc
return (oldval > 0);
}
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp0, tmp1;
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(arch_
* }
*/
__asm__ __volatile__ (
- "# __raw_spin_lock \n\t"
+ "# arch_spin_lock \n\t"
".fillinsn \n"
"1: \n\t"
"mvfc %1, psw; \n\t"
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(arch_
);
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
mb();
lock->slock = 1;
@@ -319,8 +319,8 @@ static inline int __raw_write_trylock(ra
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_M32R_SPINLOCK_H */
Index: linux-2.6-tip/arch/mips/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock.h
@@ -34,33 +34,33 @@
* becomes equal to the the initial value of the tail.
*/
-static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return ((counters >> 14) ^ counters) & 0x1fff;
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- while (__raw_spin_is_locked(x)) { cpu_relax(); }
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ while (arch_spin_is_locked(x)) { cpu_relax(); }
-static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return (((counters >> 14) - counters) & 0x1fff) > 1;
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(arch_
[my_ticket] "=&r" (my_ticket));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(arch_
smp_llsc_mb();
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
int tmp;
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(arc
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " # __raw_spin_unlock \n"
+ " # arch_spin_unlock \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addiu %[ticket], %[ticket], 1 \n"
" ori %[ticket], %[ticket], 0x2000 \n"
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(arc
[ticket] "=&r" (tmp));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_unlock \n"
+ " .set push # arch_spin_unlock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(arc
}
}
-static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_tr
[now_serving] "=&r" (tmp3));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -483,8 +483,8 @@ static inline int __raw_write_trylock(ra
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_SPINLOCK_H */
Index: linux-2.6-tip/arch/parisc/include/asm/atomic.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/atomic.h
+++ linux-2.6-tip/arch/parisc/include/asm/atomic.h
@@ -34,12 +34,12 @@ extern arch_spinlock_t __atomic_hash[ATO
#define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
-static inline int __raw_spin_is_locked(arch_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
-#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
-static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
+static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags
mb();
}
-static inline void __raw_spin_unlock(arch_spinlock_t *x)
+static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(arc
mb();
}
-static inline int __raw_spin_trylock(arch_spinlock_t *x)
+static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
@@ -73,9 +73,9 @@ static __inline__ void __raw_read_lock(
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
@@ -85,9 +85,9 @@ static __inline__ void __raw_read_unloc
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter--;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
@@ -98,9 +98,9 @@ static __inline__ int __raw_read_trylock
unsigned long flags;
retry:
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock
return 0;
/* Wait until we have a realistic chance at the lock */
- while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
+ while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
@@ -124,10 +124,10 @@ static __inline__ void __raw_write_lock(
unsigned long flags;
retry:
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) {
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0)
@@ -144,7 +144,7 @@ retry:
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{
rw->counter = 0;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
@@ -155,13 +155,13 @@ static __inline__ int __raw_write_tryloc
int result = 0;
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags);
@@ -190,8 +190,8 @@ static __inline__ int __raw_write_can_lo
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
#include <asm/asm-compat.h>
#include <asm/synch.h>
-#define __raw_spin_is_locked(x) ((x)->slock != 0)
+#define arch_spin_is_locked(x) ((x)->slock != 0)
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
-static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
+static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_tr
return tmp;
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
- return arch_spin_trylock(lock) == 0;
+ return __arch_spin_trylock(lock) == 0;
}
/*
@@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *loc
#define SHARED_PROCESSOR 0
#endif
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
- if (likely(arch_spin_trylock(lock) == 0))
+ if (likely(__arch_spin_trylock(lock) == 0))
break;
do {
HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_
}
static inline
-void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long flags_dis;
CLEAR_IO_SYNC;
while (1) {
- if (likely(arch_spin_trylock(lock) == 0))
+ if (likely(__arch_spin_trylock(lock) == 0))
break;
local_save_flags(flags_dis);
local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock
}
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
SYNC_IO;
- __asm__ __volatile__("# __raw_spin_unlock\n\t"
+ __asm__ __volatile__("# arch_spin_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
lock->slock = 0;
}
#ifdef CONFIG_PPC64
-extern void __raw_spin_unlock_wait(arch_spinlock_t *lock);
+extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
#else
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
/*
@@ -290,9 +290,9 @@ static inline void __raw_write_unlock(ra
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) __spin_yield(lock)
-#define _raw_read_relax(lock) __rw_yield(lock)
-#define _raw_write_relax(lock) __rw_yield(lock)
+#define arch_spin_relax(lock) __spin_yield(lock)
+#define arch_read_relax(lock) __rw_yield(lock)
+#define arch_write_relax(lock) __rw_yield(lock)
#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */
Index: linux-2.6-tip/arch/powerpc/kernel/rtas.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/kernel/rtas.c
+++ linux-2.6-tip/arch/powerpc/kernel/rtas.c
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
local_irq_save(flags);
preempt_disable();
- __raw_spin_lock_flags(&rtas.lock, flags);
+ arch_spin_lock_flags(&rtas.lock, flags);
return flags;
}
static void unlock_rtas(unsigned long flags)
{
- __raw_spin_unlock(&rtas.lock);
+ arch_spin_unlock(&rtas.lock);
local_irq_restore(flags);
preempt_enable();
}
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
local_irq_save(flags);
hard_irq_disable();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase = get_tb();
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
while (timebase)
barrier();
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
{
while (!timebase)
barrier();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
}
Index: linux-2.6-tip/arch/powerpc/lib/locks.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/lib/locks.c
+++ linux-2.6-tip/arch/powerpc/lib/locks.c
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
}
#endif
-void __raw_spin_unlock_wait(arch_spinlock_t *lock)
+void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (lock->slock) {
HMT_low();
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(arch_spinloc
HMT_medium();
}
-EXPORT_SYMBOL(__raw_spin_unlock_wait);
+EXPORT_SYMBOL(arch_spin_unlock_wait);
Index: linux-2.6-tip/arch/powerpc/platforms/pasemi/setup.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/pasemi/setup.c
+++ linux-2.6-tip/arch/powerpc/platforms/pasemi/setup.c
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(
local_irq_save(flags);
hard_irq_disable();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync();
timebase = get_tb();
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
while (timebase)
barrier();
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(
while (!timebase)
smp_rmb();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
}
struct smp_ops_t pas_smp_ops = {
Index: linux-2.6-tip/arch/s390/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock.h
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned
* (the type definitions are in asm/spinlock_types.h)
*/
-#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) \
- _raw_spin_relax(lock); } while (0)
-
-extern void _raw_spin_lock_wait(arch_spinlock_t *);
-extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
-extern int _raw_spin_trylock_retry(arch_spinlock_t *);
-extern void _raw_spin_relax(arch_spinlock_t *lock);
+#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) \
+ arch_spin_relax(lock); } while (0)
+
+extern void arch_spin_lock_wait(arch_spinlock_t *);
+extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+extern int arch_spin_trylock_retry(arch_spinlock_t *);
+extern void arch_spin_relax(arch_spinlock_t *lock);
-static inline void __raw_spin_lock(arch_spinlock_t *lp)
+static inline void arch_spin_lock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait(lp);
+ arch_spin_lock_wait(lp);
}
-static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,
+static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
int old;
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait_flags(lp, flags);
+ arch_spin_lock_wait_flags(lp, flags);
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lp)
+static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return 1;
- return _raw_spin_trylock_retry(lp);
+ return arch_spin_trylock_retry(lp);
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lp)
+static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
}
@@ -188,7 +188,7 @@ static inline int __raw_write_trylock(ra
return _raw_write_trylock_retry(rw);
}
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
Index: linux-2.6-tip/arch/s390/lib/spinlock.c
===================================================================
--- linux-2.6-tip.orig/arch/s390/lib/spinlock.c
+++ linux-2.6-tip/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cp
_raw_yield();
}
-void _raw_spin_lock_wait(arch_spinlock_t *lp)
+void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return;
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait);
+EXPORT_SYMBOL(arch_spin_lock_wait);
-void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
+void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spin
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
@@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spin
local_irq_restore(flags);
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
+EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-int _raw_spin_trylock_retry(arch_spinlock_t *lp)
+int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
unsigned int cpu = ~smp_processor_id();
int count;
for (count = spin_retry; count > 0; count--) {
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return 1;
}
return 0;
}
-EXPORT_SYMBOL(_raw_spin_trylock_retry);
+EXPORT_SYMBOL(arch_spin_trylock_retry);
-void _raw_spin_relax(arch_spinlock_t *lock)
+void arch_spin_relax(arch_spinlock_t *lock)
{
unsigned int cpu = lock->owner_cpu;
if (cpu != 0)
_raw_yield_cpu(~cpu);
}
-EXPORT_SYMBOL(_raw_spin_relax);
+EXPORT_SYMBOL(arch_spin_relax);
void _raw_read_lock_wait(raw_rwlock_t *rw)
{
Index: linux-2.6-tip/arch/sh/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock.h
@@ -23,10 +23,10 @@
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
-#define __raw_spin_is_locked(x) ((x)->lock <= 0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
+#define arch_spin_is_locked(x) ((x)->lock <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -34,14 +34,14 @@
*
* We make no fairness assumptions. They have a cost.
*/
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_spin_lock \n\t"
+ "movli.l @%2, %0 ! arch_spin_lock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(arch_
);
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__ (
- "mov #1, %0 ! __raw_spin_unlock \n\t"
+ "mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t"
: "=&z" (tmp)
: "r" (&lock->lock)
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(arc
);
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_spin_trylock \n\t"
+ "movli.l @%2, %0 ! arch_spin_trylock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
@@ -219,8 +219,8 @@ static inline int __raw_write_trylock(ra
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SH_SPINLOCK_H */
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_32.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
#include <asm/psr.h>
-#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
+#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
"\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(arch_
: "g2", "memory", "cc");
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(arc
return (result == 0);
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
}
@@ -176,13 +176,13 @@ static inline int arch_read_trylock(raw_
#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
#define __raw_write_can_lock(rw) (!(rw)->lock)
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_64.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
* the spinner sections must be pre-V9 branches.
*/
-#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
+#define arch_spin_is_locked(lp) ((lp)->lock != 0)
-#define __raw_spin_unlock_wait(lp) \
+#define arch_spin_unlock_wait(lp) \
do { rmb(); \
} while((lp)->lock)
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(arch_
: "memory");
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(arc
return (result == 0UL);
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(arc
: "memory");
}
-static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long tmp1, tmp2;
@@ -222,9 +222,9 @@ static int inline arch_write_trylock(raw
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define __raw_write_can_lock(rw) (!(rw)->lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* !(__ASSEMBLY__) */
Index: linux-2.6-tip/arch/x86/include/asm/paravirt.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/paravirt.h
+++ linux-2.6-tip/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-static inline int __raw_spin_is_locked(struct arch_spinlock *lock)
+static inline int arch_spin_is_locked(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
}
-static inline int __raw_spin_is_contended(struct arch_spinlock *lock)
+static inline int arch_spin_is_contended(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(struct arch_spinlock *lock)
+static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
}
-static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock,
+static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
unsigned long flags)
{
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
}
-static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock)
+static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
}
-static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock)
+static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
}
Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock.h
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_conte
#ifndef CONFIG_PARAVIRT_SPINLOCKS
-static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
@@ -298,9 +298,9 @@ static inline void __raw_write_unlock(ra
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
Index: linux-2.6-tip/arch/x86/kernel/dumpstack.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/dumpstack.c
+++ linux-2.6-tip/arch/x86/kernel/dumpstack.c
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
/* racy, but better than risking deadlock. */
raw_local_irq_save(flags);
cpu = smp_processor_id();
- if (!__raw_spin_trylock(&die_lock)) {
+ if (!arch_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
- __raw_spin_lock(&die_lock);
+ arch_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long fl
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
- __raw_spin_unlock(&die_lock);
+ arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
oops_exit();
Index: linux-2.6-tip/arch/x86/kernel/paravirt-spinlocks.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/paravirt-spinlocks.c
+++ linux-2.6-tip/arch/x86/kernel/paravirt-spinlocks.c
@@ -10,7 +10,7 @@
static inline void
default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
struct pv_lock_ops pv_lock_ops = {
Index: linux-2.6-tip/arch/x86/kernel/tsc_sync.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/tsc_sync.c
+++ linux-2.6-tip/arch/x86/kernel/tsc_sync.c
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(voi
* previous TSC that was measured (possibly on
* another CPU) and update the previous TSC timestamp.
*/
- __raw_spin_lock(&sync_lock);
+ arch_spin_lock(&sync_lock);
prev = last_tsc;
rdtsc_barrier();
now = get_cycles();
rdtsc_barrier();
last_tsc = now;
- __raw_spin_unlock(&sync_lock);
+ arch_spin_unlock(&sync_lock);
/*
* Be nice every now and then (and also check whether
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(voi
* we saw a time-warp of the TSC going backwards:
*/
if (unlikely(prev > now)) {
- __raw_spin_lock(&sync_lock);
+ arch_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
nr_warps++;
- __raw_spin_unlock(&sync_lock);
+ arch_spin_unlock(&sync_lock);
}
}
WARN(!(now-start),
Index: linux-2.6-tip/include/asm-generic/bitops/atomic.h
===================================================================
--- linux-2.6-tip.orig/include/asm-generic/bitops/atomic.h
+++ linux-2.6-tip/include/asm-generic/bitops/atomic.h
@@ -22,12 +22,12 @@ extern arch_spinlock_t __atomic_hash[ATO
#define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -14,7 +14,7 @@
* linux/spinlock_types.h:
* defines the generic type and initializers
*
- * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
* defines the generic type and initializers
*
* linux/spinlock_up.h:
- * contains the __raw_spin_*()/etc. version of UP
+ * contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
@@ -103,17 +103,17 @@ do { \
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#else
-#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#ifdef arch_spin_is_contended
+#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
#define spin_is_contended(lock) (((void)(lock), 0))
-#endif /*__raw_spin_is_contended*/
+#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
@@ -125,7 +125,7 @@ static inline void smp_mb__after_lock(vo
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _raw_spin_lock(spinlock_t *lock);
@@ -133,11 +133,11 @@ static inline void smp_mb__after_lock(vo
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
+# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
- __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
+ arch_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock)
#endif
/*
Index: linux-2.6-tip/include/linux/spinlock_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_up.h
+++ linux-2.6-tip/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-#define __raw_spin_is_locked(x) ((x)->slock == 0)
+#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(arch_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(arc
return oldval > 0;
}
-static inline void __raw_spin_unlock(arch_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}
@@ -57,20 +57,20 @@ static inline void __raw_spin_unlock(arc
#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
-#define __raw_spin_is_locked(lock) ((void)(lock), 0)
+#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
-# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
-#define __raw_spin_is_contended(lock) (((void)(lock), 0))
+#define arch_spin_is_contended(lock) (((void)(lock), 0))
#define __raw_read_can_lock(lock) (((void)(lock), 1))
#define __raw_write_can_lock(lock) (((void)(lock), 1))
-#define __raw_spin_unlock_wait(lock) \
- do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+#define arch_spin_unlock_wait(lock) \
+ do { cpu_relax(); } while (arch_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */
Index: linux-2.6-tip/kernel/lockdep.c
===================================================================
--- linux-2.6-tip.orig/kernel/lockdep.c
+++ linux-2.6-tip/kernel/lockdep.c
@@ -77,7 +77,7 @@ static arch_spinlock_t lockdep_lock = (a
static int graph_lock(void)
{
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
@@ -85,7 +85,7 @@ static int graph_lock(void)
* dropped already)
*/
if (!debug_locks) {
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return 0;
}
/* prevent any recursions within lockdep from causing deadlocks */
@@ -95,11 +95,11 @@ static int graph_lock(void)
static inline int graph_unlock(void)
{
- if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
+ if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
current->lockdep_recursion--;
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return 0;
}
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_
{
int ret = debug_locks_off();
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return ret;
}
@@ -1161,9 +1161,9 @@ unsigned long lockdep_count_forward_deps
this.class = class;
local_irq_save(flags);
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
@@ -1188,9 +1188,9 @@ unsigned long lockdep_count_backward_dep
this.class = class;
local_irq_save(flags);
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
Index: linux-2.6-tip/kernel/mutex-debug.h
===================================================================
--- linux-2.6-tip.orig/kernel/mutex-debug.h
+++ linux-2.6-tip/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(str
\
DEBUG_LOCKS_WARN_ON(in_interrupt()); \
local_irq_save(flags); \
- __raw_spin_lock(&(lock)->raw_lock); \
+ arch_spin_lock(&(lock)->raw_lock); \
DEBUG_LOCKS_WARN_ON(l->magic != l); \
} while (0)
#define spin_unlock_mutex(lock, flags) \
do { \
- __raw_spin_unlock(&(lock)->raw_lock); \
+ arch_spin_unlock(&(lock)->raw_lock); \
local_irq_restore(flags); \
preempt_check_resched(); \
} while (0)
Index: linux-2.6-tip/kernel/spinlock.c
===================================================================
--- linux-2.6-tip.orig/kernel/spinlock.c
+++ linux-2.6-tip/kernel/spinlock.c
@@ -53,7 +53,7 @@ void __lockfunc __##op##_lock(locktype##
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
} \
@@ -73,7 +73,7 @@ unsigned long __lockfunc __##op##_lock_i
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
return flags; \
Index: linux-2.6-tip/kernel/trace/ring_buffer.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/ring_buffer.c
+++ linux-2.6-tip/kernel/trace/ring_buffer.c
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_pe
int ret;
local_irq_save(flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
again:
/*
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_pe
goto again;
out:
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return reader;
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffe
synchronize_sched();
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_b
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Index: linux-2.6-tip/kernel/trace/trace.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace.c
+++ linux-2.6-tip/kernel/trace/trace.c
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, st
return;
WARN_ON_ONCE(!irqs_disabled());
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
__update_max_tr(tr, tsk, cpu);
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
}
/**
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array
return;
WARN_ON_ONCE(!irqs_disabled());
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu();
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu);
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
}
#endif /* CONFIG_TRACER_MAX_TRACE */
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct ta
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!__raw_spin_trylock(&trace_cmdline_lock))
+ if (!arch_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct ta
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- __raw_spin_unlock(&trace_cmdline_lock);
+ arch_spin_unlock(&trace_cmdline_lock);
}
void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char co
}
preempt_disable();
- __raw_spin_lock(&trace_cmdline_lock);
+ arch_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
strcpy(comm, saved_cmdlines[map]);
else
strcpy(comm, "<...>");
- __raw_spin_unlock(&trace_cmdline_lock);
+ arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, con
/* Lockdep uses trace_printk for lock tracing */
local_irq_save(flags);
- __raw_spin_lock(&trace_buf_lock);
+ arch_spin_lock(&trace_buf_lock);
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, con
ring_buffer_unlock_commit(buffer, event);
out_unlock:
- __raw_spin_unlock(&trace_buf_lock);
+ arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
out:
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_arr
pause_graph_tracing();
raw_local_irq_save(irq_flags);
- __raw_spin_lock(&trace_buf_lock);
+ arch_spin_lock(&trace_buf_lock);
if (args == NULL) {
strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
len = strlen(trace_buf);
@@ -1382,7 +1382,7 @@ int trace_array_vprintk(struct trace_arr
ring_buffer_unlock_commit(buffer, event);
out_unlock:
- __raw_spin_unlock(&trace_buf_lock);
+ arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
@@ -2254,7 +2254,7 @@ tracing_cpumask_write(struct file *filp,
mutex_lock(&tracing_cpumask_update_lock);
local_irq_disable();
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
@@ -2269,7 +2269,7 @@ tracing_cpumask_write(struct file *filp,
atomic_dec(&global_trace.data[cpu]->disabled);
}
}
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
local_irq_enable();
cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -4279,7 +4279,7 @@ static void __ftrace_dump(bool disable_t
/* only one dump */
local_irq_save(flags);
- __raw_spin_lock(&ftrace_dump_lock);
+ arch_spin_lock(&ftrace_dump_lock);
if (dump_ran)
goto out;
@@ -4354,7 +4354,7 @@ static void __ftrace_dump(bool disable_t
}
out:
- __raw_spin_unlock(&ftrace_dump_lock);
+ arch_spin_unlock(&ftrace_dump_lock);
local_irq_restore(flags);
}
Index: linux-2.6-tip/kernel/trace/trace_clock.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_clock.c
+++ linux-2.6-tip/kernel/trace/trace_clock.c
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void)
if (unlikely(in_nmi()))
goto out;
- __raw_spin_lock(&trace_clock_struct.lock);
+ arch_spin_lock(&trace_clock_struct.lock);
/*
* TODO: if this happens often then maybe we should reset
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void)
trace_clock_struct.prev_time = now;
- __raw_spin_unlock(&trace_clock_struct.lock);
+ arch_spin_unlock(&trace_clock_struct.lock);
out:
raw_local_irq_restore(flags);
Index: linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_sched_wakeup.c
+++ linux-2.6-tip/kernel/trace/trace_sched_wakeup.c
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq,
goto out;
local_irq_save(flags);
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq,
out_unlock:
__wakeup_reset(wakeup_trace);
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_ar
tracing_reset_online_cpus(tr);
local_irq_save(flags);
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
__wakeup_reset(tr);
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
}
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_
goto out;
/* interrupts should be off from try_to_wake_up */
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
/* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Index: linux-2.6-tip/kernel/trace/trace_selftest.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_selftest.c
+++ linux-2.6-tip/kernel/trace/trace_selftest.c
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trac
/* Don't allow flipping of max traces now */
local_irq_save(flags);
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
cnt = ring_buffer_entries(tr->buffer);
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trac
break;
}
tracing_on();
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
local_irq_restore(flags);
if (count)
Index: linux-2.6-tip/kernel/trace/trace_stack.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_stack.c
+++ linux-2.6-tip/kernel/trace/trace_stack.c
@@ -54,7 +54,7 @@ static inline void check_stack(void)
return;
local_irq_save(flags);
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
/* a race could have already updated it */
if (this_size <= max_stack_size)
@@ -103,7 +103,7 @@ static inline void check_stack(void)
}
out:
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags);
}
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp,
return ret;
local_irq_save(flags);
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
*ptr = val;
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags);
return count;
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff
static void *t_start(struct seq_file *m, loff_t *pos)
{
local_irq_disable();
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m,
static void t_stop(struct seq_file *m, void *p)
{
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_enable();
}
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_spin_trylock(&lock->raw_lock))
+ if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -128,14 +128,14 @@ static void __spin_lock_debug(spinlock_t
void _raw_spin_lock(spinlock_t *lock)
{
debug_spin_lock_before(lock);
- if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
+ if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
debug_spin_lock_after(lock);
}
int _raw_spin_trylock(spinlock_t *lock)
{
- int ret = __raw_spin_trylock(&lock->raw_lock);
+ int ret = arch_spin_trylock(&lock->raw_lock);
if (ret)
debug_spin_lock_after(lock);
@@ -151,7 +151,7 @@ int _raw_spin_trylock(spinlock_t *lock)
void _raw_spin_unlock(spinlock_t *lock)
{
debug_spin_unlock(lock);
- __raw_spin_unlock(&lock->raw_lock);
+ arch_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 07/23] locking: Convert raw_rwlock to arch_rwlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (5 preceding siblings ...)
2009-12-06 18:02 ` [patch 06/23] locking: Convert __raw_spin* functions to arch_spin* Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 08/23] locking: Convert raw_rwlock functions " Thomas Gleixner
` (15 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-convert-rwlock-arch.patch --]
[-- Type: text/plain, Size: 43218 bytes --]
Not strictly necessary for -rt as -rt does not have non sleeping
rwlocks, but it's odd to not have a consistent naming convention.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock.h | 16 ++++++++--------
arch/alpha/include/asm/spinlock_types.h | 4 ++--
arch/arm/include/asm/spinlock.h | 12 ++++++------
arch/arm/include/asm/spinlock_types.h | 4 ++--
arch/blackfin/include/asm/spinlock.h | 16 ++++++++--------
arch/blackfin/include/asm/spinlock_types.h | 4 ++--
arch/cris/include/arch-v32/arch/spinlock.h | 16 ++++++++--------
arch/ia64/include/asm/spinlock.h | 16 ++++++++--------
arch/ia64/include/asm/spinlock_types.h | 4 ++--
arch/m32r/include/asm/spinlock.h | 12 ++++++------
arch/m32r/include/asm/spinlock_types.h | 4 ++--
arch/mips/include/asm/spinlock.h | 12 ++++++------
arch/mips/include/asm/spinlock_types.h | 4 ++--
arch/parisc/include/asm/spinlock.h | 16 ++++++++--------
arch/parisc/include/asm/spinlock_types.h | 4 ++--
arch/powerpc/include/asm/spinlock.h | 18 +++++++++---------
arch/powerpc/include/asm/spinlock_types.h | 4 ++--
arch/powerpc/lib/locks.c | 2 +-
arch/s390/include/asm/spinlock.h | 28 ++++++++++++++--------------
arch/s390/include/asm/spinlock_types.h | 4 ++--
arch/s390/lib/spinlock.c | 12 ++++++------
arch/sh/include/asm/spinlock.h | 12 ++++++------
arch/sh/include/asm/spinlock_types.h | 4 ++--
arch/sparc/include/asm/spinlock_32.h | 20 ++++++++++----------
arch/sparc/include/asm/spinlock_64.h | 12 ++++++------
arch/sparc/include/asm/spinlock_types.h | 4 ++--
arch/x86/include/asm/spinlock.h | 16 ++++++++--------
arch/x86/include/asm/spinlock_types.h | 4 ++--
include/linux/rwlock.h | 2 +-
include/linux/rwlock_types.h | 6 +++---
include/linux/spinlock.h | 4 ++--
include/linux/spinlock_types_up.h | 4 ++--
lib/spinlock_debug.c | 2 +-
33 files changed, 151 insertions(+), 151 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock.h
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch
/***********************************************************/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int __raw_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int __raw_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(raw_rwlock_t *lock)
+static inline void __raw_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_r
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *lock)
+static inline void __raw_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t * lock)
+static inline int __raw_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw
return success;
}
-static inline int __raw_write_trylock(raw_rwlock_t * lock)
+static inline int __raw_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(ra
return success;
}
-static inline void __raw_read_unlock(raw_rwlock_t * lock)
+static inline void __raw_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,7 +160,7 @@ static inline void __raw_read_unlock(raw
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t * lock)
+static inline void __raw_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/arm/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock.h
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_
smp_mb();
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(ra
}
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(ra
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_r
smp_mb();
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw
: "cc");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
Index: linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
@@ -52,42 +52,42 @@ static inline void arch_spin_unlock_wait
cpu_relax();
}
-static inline int __raw_read_can_lock(raw_rwlock_t *rw)
+static inline int __raw_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *rw)
+static inline int __raw_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
__raw_read_unlock_asm(&rw->lock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
__raw_write_lock_asm(&rw->lock);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__raw_write_unlock_asm(&rw->lock);
}
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
@@ -21,8 +21,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
Index: linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/cris/include/arch-v32/arch/spinlock.h
+++ linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
@@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lo
*
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int __raw_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int __raw_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
@@ -74,7 +74,7 @@ static inline void __raw_read_lock(raw_
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -82,14 +82,14 @@ static inline void __raw_write_lock(raw
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -97,7 +97,7 @@ static inline void __raw_write_unlock(r
arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
@@ -109,7 +109,7 @@ static inline int __raw_read_trylock(ra
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock.h
@@ -146,7 +146,7 @@ static inline void arch_spin_unlock_wait
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -177,7 +177,7 @@ __raw_read_lock_flags(raw_rwlock_t *lock
#define __raw_read_lock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -190,14 +190,14 @@ do { \
#define __raw_read_unlock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *loc
(result == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void __raw_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(ra
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void __raw_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(ra
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(raw_rwlock_t *x)
+static inline int __raw_read_trylock(arch_rwlock_t *x)
{
union {
- raw_rwlock_t lock;
+ arch_rwlock_t lock;
__u32 word;
} old, new;
old.lock = new.lock = *x;
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
@@ -14,8 +14,8 @@ typedef struct {
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
#endif
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock.h
@@ -148,7 +148,7 @@ static inline void arch_spin_unlock(arch
*/
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_r
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(ra
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int __raw_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t*)lock;
if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int __raw_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
@@ -13,11 +13,11 @@ typedef struct {
typedef struct {
volatile int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
Index: linux-2.6-tip/arch/mips/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock.h
@@ -256,7 +256,7 @@ static inline unsigned int arch_spin_try
*/
#define __raw_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_r
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -335,7 +335,7 @@ static inline void __raw_read_unlock(raw
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -377,7 +377,7 @@ static inline void __raw_write_lock(raw_
smp_llsc_mb();
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -389,7 +389,7 @@ static inline void __raw_write_unlock(ra
: "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
@@ -433,7 +433,7 @@ static inline int __raw_read_trylock(raw
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
Index: linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
@@ -18,8 +18,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock.h
@@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
+static __inline__ void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
+static __inline__ void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -93,7 +93,7 @@ static __inline__ void __raw_read_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+static __inline__ int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
+static __inline__ void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -141,7 +141,7 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+static __inline__ void __raw_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
arch_spin_unlock(&rw->lock);
@@ -149,7 +149,7 @@ static __inline__ void __raw_write_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
+static __inline__ int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_tryloc
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
+static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,7 +182,7 @@ static __inline__ int __raw_read_can_loc
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
+static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
@@ -14,8 +14,8 @@ typedef struct {
typedef struct {
arch_spinlock_t lock;
volatile int counter;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
@@ -97,7 +97,7 @@ static inline int arch_spin_trylock(arch
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(raw_rwlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_s
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(raw_rwlock_t *rw)
+static inline long arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(raw_rwlock_t *rw)
+static inline long arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,7 +225,7 @@ static inline long arch_write_trylock(ra
return tmp;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
while (1) {
if (likely(arch_read_trylock(rw) > 0))
@@ -239,7 +239,7 @@ static inline void __raw_read_lock(raw_r
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
while (1) {
if (likely(arch_write_trylock(rw) == 0))
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_
}
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
return arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
return arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,7 +280,7 @@ static inline void __raw_read_unlock(raw
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile signed int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/powerpc/lib/locks.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/lib/locks.c
+++ linux-2.6-tip/arch/powerpc/lib/locks.c
@@ -55,7 +55,7 @@ void __spin_yield(arch_spinlock_t *lock)
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
-void __rw_yield(raw_rwlock_t *rw)
+void __rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
Index: linux-2.6-tip/arch/s390/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock.h
@@ -121,14 +121,14 @@ static inline void arch_spin_unlock(arch
*/
#define __raw_write_can_lock(x) ((x)->lock == 0)
-extern void _raw_read_lock_wait(raw_rwlock_t *lp);
-extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_r
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw
} while (cmp != old);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,7 +181,7 @@ static inline int __raw_read_trylock(raw
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
Index: linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/s390/lib/spinlock.c
===================================================================
--- linux-2.6-tip.orig/arch/s390/lib/spinlock.c
+++ linux-2.6-tip/arch/s390/lib/spinlock.c
@@ -105,7 +105,7 @@ void arch_spin_relax(arch_spinlock_t *lo
}
EXPORT_SYMBOL(arch_spin_relax);
-void _raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *r
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
@@ -145,7 +145,7 @@ void _raw_read_lock_wait_flags(raw_rwloc
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-int _raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
int count = spin_retry;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
int count = spin_retry;
@@ -197,7 +197,7 @@ void _raw_write_lock_wait_flags(raw_rwlo
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
-int _raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
int count = spin_retry;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock.h
@@ -108,7 +108,7 @@ static inline int arch_spin_trylock(arch
*/
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline void __raw_read_lock(raw_r
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -142,7 +142,7 @@ static inline void __raw_read_unlock(raw
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -160,7 +160,7 @@ static inline void __raw_write_lock(raw_
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
"mov.l %1, @%0 ! __raw_write_unlock \n\t"
@@ -170,7 +170,7 @@ static inline void __raw_write_unlock(ra
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
@@ -193,7 +193,7 @@ static inline int __raw_read_trylock(raw
return (oldval > 0);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
@@ -13,9 +13,9 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_32.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
@@ -65,7 +65,7 @@ static inline void arch_spin_unlock(arch
* Sort of like atomic_t's on Sparc, but even more clever.
*
* ------------------------------------
- * | 24-bit counter | wlock | raw_rwlock_t
+ * | 24-bit counter | wlock | arch_rwlock_t
* ------------------------------------
* 31 8 7 0
*
@@ -76,9 +76,9 @@ static inline void arch_spin_unlock(arch
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -96,9 +96,9 @@ do { unsigned long flags; \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -116,9 +116,9 @@ do { unsigned long flags; \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(ra
return (val == 0);
}
-static inline int arch_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_64.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void arch_spin_lock_flags(
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(raw_rwlock_t *lock)
+static void inline arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rw
: "memory");
}
-static int inline arch_read_trylock(raw_rwlock_t *lock)
+static int inline arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_
return tmp1;
}
-static void inline arch_read_unlock(raw_rwlock_t *lock)
+static void inline arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_
: "memory");
}
-static void inline arch_write_lock(raw_rwlock_t *lock)
+static void inline arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_r
: "memory");
}
-static void inline arch_write_unlock(raw_rwlock_t *lock)
+static void inline arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw
: "memory");
}
-static int inline arch_write_trylock(raw_rwlock_t *lock)
+static int inline arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock.h
@@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int __raw_read_can_lock(arch_rwlock_t *lock)
{
return (int)(lock)->lock > 0;
}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(ra
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int __raw_write_can_lock(arch_rwlock_t *lock)
{
return (lock)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_r
::LOCK_PTR_REG (rw) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int __raw_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int __raw_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -284,12 +284,12 @@ static inline int __raw_write_trylock(ra
return 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
Index: linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct arch_spinlock {
typedef struct {
unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
Index: linux-2.6-tip/include/linux/rwlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock.h
+++ linux-2.6-tip/include/linux/rwlock.h
@@ -14,7 +14,7 @@
* Released under the General Public License (GPL).
*/
-extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
+extern int __lockfunc generic__raw_read_trylock(arch_rwlock_t *lock);
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
Index: linux-2.6-tip/include/linux/rwlock_types.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock_types.h
+++ linux-2.6-tip/include/linux/rwlock_types.h
@@ -9,7 +9,7 @@
* Released under the General Public License (GPL).
*/
typedef struct {
- raw_rwlock_t raw_lock;
+ arch_rwlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
@@ -32,14 +32,14 @@ typedef struct {
#ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
.magic = RWLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
RW_DEP_MAP_INIT(lockname) }
#else
#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
RW_DEP_MAP_INIT(lockname) }
#endif
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -8,7 +8,7 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
@@ -75,7 +75,7 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the arch_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
Index: linux-2.6-tip/include/linux/spinlock_types_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types_up.h
+++ linux-2.6-tip/include/linux/spinlock_types_up.h
@@ -30,8 +30,8 @@ typedef struct { } arch_spinlock_t;
typedef struct {
/* no debug version on UP */
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { }
+#define __ARCH_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 07/23] locking: Convert raw_rwlock to arch_rwlock
2009-12-06 18:02 ` [patch 07/23] locking: Convert raw_rwlock to arch_rwlock Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-convert-rwlock-arch.patch --]
[-- Type: text/plain, Size: 43220 bytes --]
Not strictly necessary for -rt as -rt does not have non sleeping
rwlocks, but it's odd to not have a consistent naming convention.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock.h | 16 ++++++++--------
arch/alpha/include/asm/spinlock_types.h | 4 ++--
arch/arm/include/asm/spinlock.h | 12 ++++++------
arch/arm/include/asm/spinlock_types.h | 4 ++--
arch/blackfin/include/asm/spinlock.h | 16 ++++++++--------
arch/blackfin/include/asm/spinlock_types.h | 4 ++--
arch/cris/include/arch-v32/arch/spinlock.h | 16 ++++++++--------
arch/ia64/include/asm/spinlock.h | 16 ++++++++--------
arch/ia64/include/asm/spinlock_types.h | 4 ++--
arch/m32r/include/asm/spinlock.h | 12 ++++++------
arch/m32r/include/asm/spinlock_types.h | 4 ++--
arch/mips/include/asm/spinlock.h | 12 ++++++------
arch/mips/include/asm/spinlock_types.h | 4 ++--
arch/parisc/include/asm/spinlock.h | 16 ++++++++--------
arch/parisc/include/asm/spinlock_types.h | 4 ++--
arch/powerpc/include/asm/spinlock.h | 18 +++++++++---------
arch/powerpc/include/asm/spinlock_types.h | 4 ++--
arch/powerpc/lib/locks.c | 2 +-
arch/s390/include/asm/spinlock.h | 28 ++++++++++++++--------------
arch/s390/include/asm/spinlock_types.h | 4 ++--
arch/s390/lib/spinlock.c | 12 ++++++------
arch/sh/include/asm/spinlock.h | 12 ++++++------
arch/sh/include/asm/spinlock_types.h | 4 ++--
arch/sparc/include/asm/spinlock_32.h | 20 ++++++++++----------
arch/sparc/include/asm/spinlock_64.h | 12 ++++++------
arch/sparc/include/asm/spinlock_types.h | 4 ++--
arch/x86/include/asm/spinlock.h | 16 ++++++++--------
arch/x86/include/asm/spinlock_types.h | 4 ++--
include/linux/rwlock.h | 2 +-
include/linux/rwlock_types.h | 6 +++---
include/linux/spinlock.h | 4 ++--
include/linux/spinlock_types_up.h | 4 ++--
lib/spinlock_debug.c | 2 +-
33 files changed, 151 insertions(+), 151 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock.h
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch
/***********************************************************/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int __raw_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int __raw_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(raw_rwlock_t *lock)
+static inline void __raw_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_r
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *lock)
+static inline void __raw_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t * lock)
+static inline int __raw_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw
return success;
}
-static inline int __raw_write_trylock(raw_rwlock_t * lock)
+static inline int __raw_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(ra
return success;
}
-static inline void __raw_read_unlock(raw_rwlock_t * lock)
+static inline void __raw_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,7 +160,7 @@ static inline void __raw_read_unlock(raw
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t * lock)
+static inline void __raw_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/arm/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock.h
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_
smp_mb();
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(ra
}
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(ra
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_r
smp_mb();
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw
: "cc");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
Index: linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
@@ -52,42 +52,42 @@ static inline void arch_spin_unlock_wait
cpu_relax();
}
-static inline int __raw_read_can_lock(raw_rwlock_t *rw)
+static inline int __raw_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *rw)
+static inline int __raw_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
__raw_read_unlock_asm(&rw->lock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
__raw_write_lock_asm(&rw->lock);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__raw_write_unlock_asm(&rw->lock);
}
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock_types.h
@@ -21,8 +21,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
Index: linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/cris/include/arch-v32/arch/spinlock.h
+++ linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
@@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lo
*
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int __raw_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int __raw_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
@@ -74,7 +74,7 @@ static inline void __raw_read_lock(raw_
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -82,14 +82,14 @@ static inline void __raw_write_lock(raw
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -97,7 +97,7 @@ static inline void __raw_write_unlock(r
arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
@@ -109,7 +109,7 @@ static inline int __raw_read_trylock(ra
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock.h
@@ -146,7 +146,7 @@ static inline void arch_spin_unlock_wait
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -177,7 +177,7 @@ __raw_read_lock_flags(raw_rwlock_t *lock
#define __raw_read_lock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -190,14 +190,14 @@ do { \
#define __raw_read_unlock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *loc
(result == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void __raw_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(ra
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void __raw_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(ra
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(raw_rwlock_t *x)
+static inline int __raw_read_trylock(arch_rwlock_t *x)
{
union {
- raw_rwlock_t lock;
+ arch_rwlock_t lock;
__u32 word;
} old, new;
old.lock = new.lock = *x;
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock_types.h
@@ -14,8 +14,8 @@ typedef struct {
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
#endif
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock.h
@@ -148,7 +148,7 @@ static inline void arch_spin_unlock(arch
*/
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_r
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(ra
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int __raw_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t*)lock;
if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int __raw_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock_types.h
@@ -13,11 +13,11 @@ typedef struct {
typedef struct {
volatile int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
Index: linux-2.6-tip/arch/mips/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock.h
@@ -256,7 +256,7 @@ static inline unsigned int arch_spin_try
*/
#define __raw_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_r
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -335,7 +335,7 @@ static inline void __raw_read_unlock(raw
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -377,7 +377,7 @@ static inline void __raw_write_lock(raw_
smp_llsc_mb();
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -389,7 +389,7 @@ static inline void __raw_write_unlock(ra
: "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
@@ -433,7 +433,7 @@ static inline int __raw_read_trylock(raw
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
Index: linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock_types.h
@@ -18,8 +18,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock.h
@@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
+static __inline__ void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
+static __inline__ void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -93,7 +93,7 @@ static __inline__ void __raw_read_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+static __inline__ int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
+static __inline__ void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -141,7 +141,7 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+static __inline__ void __raw_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
arch_spin_unlock(&rw->lock);
@@ -149,7 +149,7 @@ static __inline__ void __raw_write_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
+static __inline__ int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_tryloc
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
+static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,7 +182,7 @@ static __inline__ int __raw_read_can_loc
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
+static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock_types.h
@@ -14,8 +14,8 @@ typedef struct {
typedef struct {
arch_spinlock_t lock;
volatile int counter;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
@@ -97,7 +97,7 @@ static inline int arch_spin_trylock(arch
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(raw_rwlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_s
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(raw_rwlock_t *rw)
+static inline long arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(raw_rwlock_t *rw)
+static inline long arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,7 +225,7 @@ static inline long arch_write_trylock(ra
return tmp;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
while (1) {
if (likely(arch_read_trylock(rw) > 0))
@@ -239,7 +239,7 @@ static inline void __raw_read_lock(raw_r
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
while (1) {
if (likely(arch_write_trylock(rw) == 0))
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_
}
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
return arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
return arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,7 +280,7 @@ static inline void __raw_read_unlock(raw
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile signed int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/powerpc/lib/locks.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/lib/locks.c
+++ linux-2.6-tip/arch/powerpc/lib/locks.c
@@ -55,7 +55,7 @@ void __spin_yield(arch_spinlock_t *lock)
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
-void __rw_yield(raw_rwlock_t *rw)
+void __rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
Index: linux-2.6-tip/arch/s390/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock.h
@@ -121,14 +121,14 @@ static inline void arch_spin_unlock(arch
*/
#define __raw_write_can_lock(x) ((x)->lock == 0)
-extern void _raw_read_lock_wait(raw_rwlock_t *lp);
-extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_r
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw
} while (cmp != old);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,7 +181,7 @@ static inline int __raw_read_trylock(raw
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
Index: linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/s390/lib/spinlock.c
===================================================================
--- linux-2.6-tip.orig/arch/s390/lib/spinlock.c
+++ linux-2.6-tip/arch/s390/lib/spinlock.c
@@ -105,7 +105,7 @@ void arch_spin_relax(arch_spinlock_t *lo
}
EXPORT_SYMBOL(arch_spin_relax);
-void _raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *r
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
@@ -145,7 +145,7 @@ void _raw_read_lock_wait_flags(raw_rwloc
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-int _raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
int count = spin_retry;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
int count = spin_retry;
@@ -197,7 +197,7 @@ void _raw_write_lock_wait_flags(raw_rwlo
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
-int _raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
int count = spin_retry;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock.h
@@ -108,7 +108,7 @@ static inline int arch_spin_trylock(arch
*/
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline void __raw_read_lock(raw_r
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -142,7 +142,7 @@ static inline void __raw_read_unlock(raw
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -160,7 +160,7 @@ static inline void __raw_write_lock(raw_
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
"mov.l %1, @%0 ! __raw_write_unlock \n\t"
@@ -170,7 +170,7 @@ static inline void __raw_write_unlock(ra
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
@@ -193,7 +193,7 @@ static inline int __raw_read_trylock(raw
return (oldval > 0);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock_types.h
@@ -13,9 +13,9 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_32.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
@@ -65,7 +65,7 @@ static inline void arch_spin_unlock(arch
* Sort of like atomic_t's on Sparc, but even more clever.
*
* ------------------------------------
- * | 24-bit counter | wlock | raw_rwlock_t
+ * | 24-bit counter | wlock | arch_rwlock_t
* ------------------------------------
* 31 8 7 0
*
@@ -76,9 +76,9 @@ static inline void arch_spin_unlock(arch
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -96,9 +96,9 @@ do { unsigned long flags; \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -116,9 +116,9 @@ do { unsigned long flags; \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(ra
return (val == 0);
}
-static inline int arch_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_64.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void arch_spin_lock_flags(
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(raw_rwlock_t *lock)
+static void inline arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rw
: "memory");
}
-static int inline arch_read_trylock(raw_rwlock_t *lock)
+static int inline arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_
return tmp1;
}
-static void inline arch_read_unlock(raw_rwlock_t *lock)
+static void inline arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_
: "memory");
}
-static void inline arch_write_lock(raw_rwlock_t *lock)
+static void inline arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_r
: "memory");
}
-static void inline arch_write_unlock(raw_rwlock_t *lock)
+static void inline arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw
: "memory");
}
-static int inline arch_write_trylock(raw_rwlock_t *lock)
+static int inline arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock.h
@@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int __raw_read_can_lock(arch_rwlock_t *lock)
{
return (int)(lock)->lock > 0;
}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(ra
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int __raw_write_can_lock(arch_rwlock_t *lock)
{
return (lock)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_r
::LOCK_PTR_REG (rw) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int __raw_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int __raw_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -284,12 +284,12 @@ static inline int __raw_write_trylock(ra
return 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
Index: linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock_types.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct arch_spinlock {
typedef struct {
unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
Index: linux-2.6-tip/include/linux/rwlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock.h
+++ linux-2.6-tip/include/linux/rwlock.h
@@ -14,7 +14,7 @@
* Released under the General Public License (GPL).
*/
-extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
+extern int __lockfunc generic__raw_read_trylock(arch_rwlock_t *lock);
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
Index: linux-2.6-tip/include/linux/rwlock_types.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock_types.h
+++ linux-2.6-tip/include/linux/rwlock_types.h
@@ -9,7 +9,7 @@
* Released under the General Public License (GPL).
*/
typedef struct {
- raw_rwlock_t raw_lock;
+ arch_rwlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
@@ -32,14 +32,14 @@ typedef struct {
#ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
.magic = RWLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
RW_DEP_MAP_INIT(lockname) }
#else
#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
RW_DEP_MAP_INIT(lockname) }
#endif
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -8,7 +8,7 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
@@ -75,7 +75,7 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the arch_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
Index: linux-2.6-tip/include/linux/spinlock_types_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types_up.h
+++ linux-2.6-tip/include/linux/spinlock_types_up.h
@@ -30,8 +30,8 @@ typedef struct { } arch_spinlock_t;
typedef struct {
/* no debug version on UP */
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { }
+#define __ARCH_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 08/23] locking: Convert raw_rwlock functions to arch_rwlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (6 preceding siblings ...)
2009-12-06 18:02 ` [patch 07/23] locking: Convert raw_rwlock to arch_rwlock Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 09/23] locking: Implement new raw_spinlock Thomas Gleixner
` (14 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-convert-rwlock-arch-functions.patch --]
[-- Type: text/plain, Size: 50428 bytes --]
Name space cleanup for rwlock functions. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock.h | 20 ++++++-------
arch/arm/include/asm/spinlock.h | 20 ++++++-------
arch/blackfin/include/asm/spinlock.h | 40 +++++++++++++--------------
arch/cris/include/arch-v32/arch/spinlock.h | 16 +++++------
arch/ia64/include/asm/spinlock.h | 32 +++++++++++-----------
arch/m32r/include/asm/spinlock.h | 20 ++++++-------
arch/mips/include/asm/spinlock.h | 42 ++++++++++++++---------------
arch/parisc/include/asm/spinlock.h | 20 ++++++-------
arch/powerpc/include/asm/spinlock.h | 32 +++++++++++-----------
arch/s390/include/asm/spinlock.h | 20 ++++++-------
arch/s390/lib/spinlock.c | 12 ++++----
arch/sh/include/asm/spinlock.h | 32 +++++++++++-----------
arch/sparc/include/asm/spinlock_32.h | 32 +++++++++++-----------
arch/sparc/include/asm/spinlock_64.h | 20 ++++++-------
arch/x86/include/asm/spinlock.h | 20 ++++++-------
include/linux/rwlock.h | 22 +++++++--------
include/linux/spinlock_up.h | 16 +++++------
lib/spinlock_debug.c | 16 +++++------
18 files changed, 216 insertions(+), 216 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock.h
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch
/***********************************************************/
-static inline int __raw_read_can_lock(arch_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(arch_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(arch_rwlock_t * lock)
+static inline int arch_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arc
return success;
}
-static inline int __raw_write_trylock(arch_rwlock_t * lock)
+static inline int arch_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(ar
return success;
}
-static inline void __raw_read_unlock(arch_rwlock_t * lock)
+static inline void arch_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arc
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(arch_rwlock_t * lock)
+static inline void arch_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/arm/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock.h
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch
smp_mb();
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(ar
}
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(ar
}
/* write_can_lock - would write_trylock() succeed? */
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
/*
* Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(ar
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_
smp_mb();
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arc
: "cc");
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
@@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arc
}
/* read_can_lock - would read_trylock() succeed? */
-#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
@@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_read_lock_asm(volatile int *ptr);
-asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_write_lock_asm(volatile int *ptr);
-asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
+asmlinkage void arch_read_lock_asm(volatile int *ptr);
+asmlinkage int arch_read_trylock_asm(volatile int *ptr);
+asmlinkage void arch_read_unlock_asm(volatile int *ptr);
+asmlinkage void arch_write_lock_asm(volatile int *ptr);
+asmlinkage int arch_write_trylock_asm(volatile int *ptr);
+asmlinkage void arch_write_unlock_asm(volatile int *ptr);
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
@@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait
cpu_relax();
}
-static inline int __raw_read_can_lock(arch_rwlock_t *rw)
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
-static inline int __raw_write_can_lock(arch_rwlock_t *rw)
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_read_lock_asm(&rw->lock);
+ arch_read_lock_asm(&rw->lock);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return __raw_read_trylock_asm(&rw->lock);
+ return arch_read_trylock_asm(&rw->lock);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_read_unlock_asm(&rw->lock);
+ arch_read_unlock_asm(&rw->lock);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_write_lock_asm(&rw->lock);
+ arch_write_lock_asm(&rw->lock);
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return __raw_write_trylock_asm(&rw->lock);
+ return arch_write_trylock_asm(&rw->lock);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_write_unlock_asm(&rw->lock);
+ arch_write_unlock_asm(&rw->lock);
}
#define arch_spin_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/cris/include/arch-v32/arch/spinlock.h
+++ linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
@@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lo
*
*/
-static inline int __raw_read_can_lock(arch_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(arch_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
@@ -74,7 +74,7 @@ static inline void __raw_read_lock(arch
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -82,14 +82,14 @@ static inline void __raw_write_lock(arc
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -97,7 +97,7 @@ static inline void __raw_write_unlock(a
arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
@@ -109,7 +109,7 @@ static inline int __raw_read_trylock(ar
return ret;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock.h
@@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait
__ticket_spin_unlock_wait(lock);
}
-#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *loc
: "p6", "p7", "r2", "memory");
}
-#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define __raw_read_lock(rw) \
+#define arch_read_lock(rw) \
do { \
arch_rwlock_t *__read_lock_ptr = (rw); \
\
@@ -188,7 +188,7 @@ do { \
#endif /* !ASM_SUPPORTED */
-#define __raw_read_unlock(rw) \
+#define arch_read_unlock(rw) \
do { \
arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -197,7 +197,7 @@ do { \
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lo
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
-#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
register long result; \
\
@@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lo
(result == 0); \
})
-static inline void __raw_write_unlock(arch_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(ar
#else /* !ASM_SUPPORTED */
-#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-#define __raw_write_lock(l) \
+#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(ar
} while (ia64_val); \
})
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(ar
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(arch_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -273,7 +273,7 @@ static inline void __raw_write_unlock(ar
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(arch_rwlock_t *x)
+static inline int arch_read_trylock(arch_rwlock_t *x)
{
union {
arch_rwlock_t lock;
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock.h
@@ -140,15 +140,15 @@ static inline void arch_spin_unlock(arch
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(arch_
);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(arch
);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(arc
);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(ar
);
}
-static inline int __raw_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t*)lock;
if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(arc
return 0;
}
-static inline int __raw_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,8 +316,8 @@ static inline int __raw_write_trylock(ar
return 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/mips/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock.h
@@ -248,21 +248,21 @@ static inline unsigned int arch_spin_try
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
/*
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(arch_
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(arch_
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(arc
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- "1: ll %1, %2 # __raw_read_unlock \n"
+ "1: ll %1, %2 # arch_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(arc
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_unlock \n"
+ " .set noreorder # arch_read_unlock \n"
"1: ll %1, %2 \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(arc
}
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(arch
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(arch
smp_llsc_mb();
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
__asm__ __volatile__(
- " # __raw_write_unlock \n"
+ " # arch_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
: "memory");
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(arc
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(arc
return ret;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(ar
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -480,8 +480,8 @@ static inline int __raw_write_trylock(ar
return ret;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock.h
@@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(arch_rwlock_t *rw)
+static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(arch_rwlock_t *rw)
+static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -93,7 +93,7 @@ static __inline__ void __raw_read_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(arch_rwlock_t *rw)
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(arch_rwlock_t *rw)
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -141,7 +141,7 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(arch_rwlock_t *rw)
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
arch_spin_unlock(&rw->lock);
@@ -149,7 +149,7 @@ static __inline__ void __raw_write_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(arch_rwlock_t *rw)
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_tryloc
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw)
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,13 +182,13 @@ static __inline__ int __raw_read_can_loc
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw)
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
@@ -166,8 +166,8 @@ extern void arch_spin_unlock_wait(arch_s
* read-locks.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_write_can_lock(rw) (!(rw)->lock)
#ifdef CONFIG_PPC64
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_s
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(arch_rwlock_t *rw)
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(arc
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(arch_rwlock_t *rw)
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(ar
return tmp;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_read_trylock(rw) > 0))
+ if (likely(__arch_read_trylock(rw) > 0))
break;
do {
HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(arch_
}
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_write_trylock(rw) == 0))
+ if (likely(__arch_write_trylock(rw) == 0))
break;
do {
HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(arch
}
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return arch_read_trylock(rw) > 0;
+ return __arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return arch_write_trylock(rw) == 0;
+ return __arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,15 +280,15 @@ static inline void __raw_read_unlock(arc
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
rw->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) __spin_yield(lock)
#define arch_read_relax(lock) __rw_yield(lock)
Index: linux-2.6-tip/arch/s390/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock.h
@@ -113,13 +113,13 @@ static inline void arch_spin_unlock(arch
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
@@ -128,7 +128,7 @@ extern void _raw_write_lock_wait(arch_rw
extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(arch_
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
+static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(arc
} while (cmp != old);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
+static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,7 +181,7 @@ static inline int __raw_read_trylock(arc
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
Index: linux-2.6-tip/arch/s390/lib/spinlock.c
===================================================================
--- linux-2.6-tip.orig/arch/s390/lib/spinlock.c
+++ linux-2.6-tip/arch/s390/lib/spinlock.c
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(arch_rwlock_t *
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(arch_rwlo
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
local_irq_disable();
@@ -151,7 +151,7 @@ int _raw_read_trylock_retry(arch_rwlock_
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(arch_rwlock_t
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return;
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(arch_rwl
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -202,7 +202,7 @@ int _raw_write_trylock_retry(arch_rwlock
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return 1;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock.h
@@ -100,21 +100,21 @@ static inline int arch_spin_trylock(arch
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((x)->lock > 0)
+#define arch_read_can_lock(x) ((x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_lock \n\t"
+ "movli.l @%1, %0 ! arch_read_lock \n\t"
"cmp/pl %0 \n\t"
"bf 1b \n\t"
"add #-1, %0 \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(arch_
);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_unlock \n\t"
+ "movli.l @%1, %0 ! arch_read_unlock \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(arc
);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_write_lock \n\t"
+ "movli.l @%1, %0 ! arch_write_lock \n\t"
"cmp/hs %2, %0 \n\t"
"bf 1b \n\t"
"sub %2, %0 \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(arch
);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
- "mov.l %1, @%0 ! __raw_write_unlock \n\t"
+ "mov.l %1, @%0 ! arch_write_unlock \n\t"
:
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
: "t", "memory"
);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_read_trylock \n\t"
+ "movli.l @%2, %0 ! arch_read_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/pl %0 \n\t"
"bf 2f \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(arc
return (oldval > 0);
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_write_trylock \n\t"
+ "movli.l @%2, %0 ! arch_write_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/hs %3, %0 \n\t"
"bf 2f \n\t"
@@ -216,8 +216,8 @@ static inline int __raw_write_trylock(ar
return (oldval > (RW_LOCK_BIAS - 1));
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_32.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
@@ -76,7 +76,7 @@ static inline void arch_spin_unlock(arch
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(arch_rwlock_t *rw)
+static inline void __arch_read_lock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -89,14 +89,14 @@ static inline void arch_read_lock(arch_r
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_lock(lock) \
+#define arch_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_lock(lock); \
+ __arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(arch_rwlock_t *rw)
+static inline void __arch_read_unlock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -109,14 +109,14 @@ static inline void arch_read_unlock(arch
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_unlock(lock) \
+#define arch_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_unlock(lock); \
+ __arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(arch
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(ar
return (val == 0);
}
-static inline int arch_read_trylock(arch_rwlock_t *rw)
+static inline int __arch_read_trylock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(arch
return res;
}
-#define __raw_read_trylock(lock) \
+#define arch_read_trylock(lock) \
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
- res = arch_read_trylock(lock); \
+ res = __arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
-#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
+#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
-#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
+#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
+#define arch_write_can_lock(rw) (!(rw)->lock)
#endif /* !(__ASSEMBLY__) */
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_64.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
@@ -210,17 +210,17 @@ static int inline arch_write_trylock(arc
return result;
}
-#define __raw_read_lock(p) arch_read_lock(p)
-#define __raw_read_lock_flags(p, f) arch_read_lock(p)
-#define __raw_read_trylock(p) arch_read_trylock(p)
-#define __raw_read_unlock(p) arch_read_unlock(p)
-#define __raw_write_lock(p) arch_write_lock(p)
-#define __raw_write_lock_flags(p, f) arch_write_lock(p)
-#define __raw_write_unlock(p) arch_write_unlock(p)
-#define __raw_write_trylock(p) arch_write_trylock(p)
+#define arch_read_lock(p) arch_read_lock(p)
+#define arch_read_lock_flags(p, f) arch_read_lock(p)
+#define arch_read_trylock(p) arch_read_trylock(p)
+#define arch_read_unlock(p) arch_read_unlock(p)
+#define arch_write_lock(p) arch_write_lock(p)
+#define arch_write_lock_flags(p, f) arch_write_lock(p)
+#define arch_write_unlock(p) arch_write_unlock(p)
+#define arch_write_trylock(p) arch_write_trylock(p)
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
+#define arch_write_can_lock(rw) (!(rw)->lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock.h
@@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_read_can_lock(arch_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (int)(lock)->lock > 0;
}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(ar
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_write_can_lock(arch_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return (lock)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(arch_
::LOCK_PTR_REG (rw) : "memory");
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(arch
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}
-static inline int __raw_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(arc
return 0;
}
-static inline int __raw_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -284,19 +284,19 @@ static inline int __raw_write_trylock(ar
return 0;
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/include/linux/rwlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock.h
+++ linux-2.6-tip/include/linux/rwlock.h
@@ -14,7 +14,7 @@
* Released under the General Public License (GPL).
*/
-extern int __lockfunc generic__raw_read_trylock(arch_rwlock_t *lock);
+extern int __lockfunc genericarch_read_trylock(arch_rwlock_t *lock);
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
@@ -40,20 +40,20 @@ do { \
extern int _raw_write_trylock(rwlock_t *lock);
extern void _raw_write_unlock(rwlock_t *lock);
#else
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+ arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
/*
* Define the various rw_lock methods. Note we define these
Index: linux-2.6-tip/include/linux/spinlock_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_up.h
+++ linux-2.6-tip/include/linux/spinlock_up.h
@@ -49,12 +49,12 @@ static inline void arch_spin_unlock(arch
/*
* Read-write spinlocks. No debug version.
*/
-#define __raw_read_lock(lock) do { (void)(lock); } while (0)
-#define __raw_write_lock(lock) do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
@@ -67,8 +67,8 @@ static inline void arch_spin_unlock(arch
#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define __raw_read_can_lock(lock) (((void)(lock), 1))
-#define __raw_write_can_lock(lock) (((void)(lock), 1))
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
#define arch_spin_unlock_wait(lock) \
do { cpu_relax(); } while (arch_spin_is_locked(lock))
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_read_trylock(&lock->raw_lock))
+ if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -196,12 +196,12 @@ static void __read_lock_debug(rwlock_t *
void _raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_lock(&lock->raw_lock);
+ arch_read_lock(&lock->raw_lock);
}
int _raw_read_trylock(rwlock_t *lock)
{
- int ret = __raw_read_trylock(&lock->raw_lock);
+ int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
@@ -215,7 +215,7 @@ int _raw_read_trylock(rwlock_t *lock)
void _raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_unlock(&lock->raw_lock);
+ arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_write_trylock(&lock->raw_lock))
+ if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -271,13 +271,13 @@ static void __write_lock_debug(rwlock_t
void _raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- __raw_write_lock(&lock->raw_lock);
+ arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
int _raw_write_trylock(rwlock_t *lock)
{
- int ret = __raw_write_trylock(&lock->raw_lock);
+ int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
@@ -293,5 +293,5 @@ int _raw_write_trylock(rwlock_t *lock)
void _raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
- __raw_write_unlock(&lock->raw_lock);
+ arch_write_unlock(&lock->raw_lock);
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 08/23] locking: Convert raw_rwlock functions to arch_rwlock
2009-12-06 18:02 ` [patch 08/23] locking: Convert raw_rwlock functions " Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-convert-rwlock-arch-functions.patch --]
[-- Type: text/plain, Size: 50430 bytes --]
Name space cleanup for rwlock functions. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/include/asm/spinlock.h | 20 ++++++-------
arch/arm/include/asm/spinlock.h | 20 ++++++-------
arch/blackfin/include/asm/spinlock.h | 40 +++++++++++++--------------
arch/cris/include/arch-v32/arch/spinlock.h | 16 +++++------
arch/ia64/include/asm/spinlock.h | 32 +++++++++++-----------
arch/m32r/include/asm/spinlock.h | 20 ++++++-------
arch/mips/include/asm/spinlock.h | 42 ++++++++++++++---------------
arch/parisc/include/asm/spinlock.h | 20 ++++++-------
arch/powerpc/include/asm/spinlock.h | 32 +++++++++++-----------
arch/s390/include/asm/spinlock.h | 20 ++++++-------
arch/s390/lib/spinlock.c | 12 ++++----
arch/sh/include/asm/spinlock.h | 32 +++++++++++-----------
arch/sparc/include/asm/spinlock_32.h | 32 +++++++++++-----------
arch/sparc/include/asm/spinlock_64.h | 20 ++++++-------
arch/x86/include/asm/spinlock.h | 20 ++++++-------
include/linux/rwlock.h | 22 +++++++--------
include/linux/spinlock_up.h | 16 +++++------
lib/spinlock_debug.c | 16 +++++------
18 files changed, 216 insertions(+), 216 deletions(-)
Index: linux-2.6-tip/arch/alpha/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock.h
+++ linux-2.6-tip/arch/alpha/include/asm/spinlock.h
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch
/***********************************************************/
-static inline int __raw_read_can_lock(arch_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(arch_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(arch_rwlock_t * lock)
+static inline int arch_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arc
return success;
}
-static inline int __raw_write_trylock(arch_rwlock_t * lock)
+static inline int arch_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(ar
return success;
}
-static inline void __raw_read_unlock(arch_rwlock_t * lock)
+static inline void arch_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arc
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(arch_rwlock_t * lock)
+static inline void arch_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/arm/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/spinlock.h
+++ linux-2.6-tip/arch/arm/include/asm/spinlock.h
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch
smp_mb();
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(ar
}
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(ar
}
/* write_can_lock - would write_trylock() succeed? */
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
/*
* Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(ar
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_
smp_mb();
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arc
: "cc");
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
@@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arc
}
/* read_can_lock - would read_trylock() succeed? */
-#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/include/asm/spinlock.h
+++ linux-2.6-tip/arch/blackfin/include/asm/spinlock.h
@@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_read_lock_asm(volatile int *ptr);
-asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_write_lock_asm(volatile int *ptr);
-asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
+asmlinkage void arch_read_lock_asm(volatile int *ptr);
+asmlinkage int arch_read_trylock_asm(volatile int *ptr);
+asmlinkage void arch_read_unlock_asm(volatile int *ptr);
+asmlinkage void arch_write_lock_asm(volatile int *ptr);
+asmlinkage int arch_write_trylock_asm(volatile int *ptr);
+asmlinkage void arch_write_unlock_asm(volatile int *ptr);
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
@@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait
cpu_relax();
}
-static inline int __raw_read_can_lock(arch_rwlock_t *rw)
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
-static inline int __raw_write_can_lock(arch_rwlock_t *rw)
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_read_lock_asm(&rw->lock);
+ arch_read_lock_asm(&rw->lock);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return __raw_read_trylock_asm(&rw->lock);
+ return arch_read_trylock_asm(&rw->lock);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_read_unlock_asm(&rw->lock);
+ arch_read_unlock_asm(&rw->lock);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_write_lock_asm(&rw->lock);
+ arch_write_lock_asm(&rw->lock);
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return __raw_write_trylock_asm(&rw->lock);
+ return arch_write_trylock_asm(&rw->lock);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_write_unlock_asm(&rw->lock);
+ arch_write_unlock_asm(&rw->lock);
}
#define arch_spin_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/cris/include/arch-v32/arch/spinlock.h
+++ linux-2.6-tip/arch/cris/include/arch-v32/arch/spinlock.h
@@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lo
*
*/
-static inline int __raw_read_can_lock(arch_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(arch_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
@@ -74,7 +74,7 @@ static inline void __raw_read_lock(arch
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -82,14 +82,14 @@ static inline void __raw_write_lock(arc
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -97,7 +97,7 @@ static inline void __raw_write_unlock(a
arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
@@ -109,7 +109,7 @@ static inline int __raw_read_trylock(ar
return ret;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
Index: linux-2.6-tip/arch/ia64/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/ia64/include/asm/spinlock.h
+++ linux-2.6-tip/arch/ia64/include/asm/spinlock.h
@@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait
__ticket_spin_unlock_wait(lock);
}
-#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *loc
: "p6", "p7", "r2", "memory");
}
-#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define __raw_read_lock(rw) \
+#define arch_read_lock(rw) \
do { \
arch_rwlock_t *__read_lock_ptr = (rw); \
\
@@ -188,7 +188,7 @@ do { \
#endif /* !ASM_SUPPORTED */
-#define __raw_read_unlock(rw) \
+#define arch_read_unlock(rw) \
do { \
arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -197,7 +197,7 @@ do { \
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lo
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
-#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
register long result; \
\
@@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lo
(result == 0); \
})
-static inline void __raw_write_unlock(arch_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(ar
#else /* !ASM_SUPPORTED */
-#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-#define __raw_write_lock(l) \
+#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(ar
} while (ia64_val); \
})
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(ar
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(arch_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -273,7 +273,7 @@ static inline void __raw_write_unlock(ar
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(arch_rwlock_t *x)
+static inline int arch_read_trylock(arch_rwlock_t *x)
{
union {
arch_rwlock_t lock;
Index: linux-2.6-tip/arch/m32r/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/m32r/include/asm/spinlock.h
+++ linux-2.6-tip/arch/m32r/include/asm/spinlock.h
@@ -140,15 +140,15 @@ static inline void arch_spin_unlock(arch
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(arch_
);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(arch
);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(arc
);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(ar
);
}
-static inline int __raw_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t*)lock;
if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(arc
return 0;
}
-static inline int __raw_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,8 +316,8 @@ static inline int __raw_write_trylock(ar
return 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/mips/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/mips/include/asm/spinlock.h
+++ linux-2.6-tip/arch/mips/include/asm/spinlock.h
@@ -248,21 +248,21 @@ static inline unsigned int arch_spin_try
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
/*
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(arch_
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(arch_
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(arc
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- "1: ll %1, %2 # __raw_read_unlock \n"
+ "1: ll %1, %2 # arch_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(arc
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_unlock \n"
+ " .set noreorder # arch_read_unlock \n"
"1: ll %1, %2 \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(arc
}
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(arch
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(arch
smp_llsc_mb();
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
__asm__ __volatile__(
- " # __raw_write_unlock \n"
+ " # arch_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
: "memory");
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(arc
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(arc
return ret;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(ar
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -480,8 +480,8 @@ static inline int __raw_write_trylock(ar
return ret;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/parisc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/parisc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/parisc/include/asm/spinlock.h
@@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(arch_rwlock_t *rw)
+static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(arch_rwlock_t *rw)
+static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -93,7 +93,7 @@ static __inline__ void __raw_read_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(arch_rwlock_t *rw)
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(arch_rwlock_t *rw)
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -141,7 +141,7 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(arch_rwlock_t *rw)
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
arch_spin_unlock(&rw->lock);
@@ -149,7 +149,7 @@ static __inline__ void __raw_write_unloc
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(arch_rwlock_t *rw)
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_tryloc
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw)
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,13 +182,13 @@ static __inline__ int __raw_read_can_loc
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw)
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/include/asm/spinlock.h
+++ linux-2.6-tip/arch/powerpc/include/asm/spinlock.h
@@ -166,8 +166,8 @@ extern void arch_spin_unlock_wait(arch_s
* read-locks.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_write_can_lock(rw) (!(rw)->lock)
#ifdef CONFIG_PPC64
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_s
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(arch_rwlock_t *rw)
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(arc
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(arch_rwlock_t *rw)
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(ar
return tmp;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_read_trylock(rw) > 0))
+ if (likely(__arch_read_trylock(rw) > 0))
break;
do {
HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(arch_
}
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_write_trylock(rw) == 0))
+ if (likely(__arch_write_trylock(rw) == 0))
break;
do {
HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(arch
}
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return arch_read_trylock(rw) > 0;
+ return __arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return arch_write_trylock(rw) == 0;
+ return __arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,15 +280,15 @@ static inline void __raw_read_unlock(arc
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
rw->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) __spin_yield(lock)
#define arch_read_relax(lock) __rw_yield(lock)
Index: linux-2.6-tip/arch/s390/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/s390/include/asm/spinlock.h
+++ linux-2.6-tip/arch/s390/include/asm/spinlock.h
@@ -113,13 +113,13 @@ static inline void arch_spin_unlock(arch
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
@@ -128,7 +128,7 @@ extern void _raw_write_lock_wait(arch_rw
extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(arch_
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
+static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(arc
} while (cmp != old);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
+static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,7 +181,7 @@ static inline int __raw_read_trylock(arc
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
Index: linux-2.6-tip/arch/s390/lib/spinlock.c
===================================================================
--- linux-2.6-tip.orig/arch/s390/lib/spinlock.c
+++ linux-2.6-tip/arch/s390/lib/spinlock.c
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(arch_rwlock_t *
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(arch_rwlo
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
local_irq_disable();
@@ -151,7 +151,7 @@ int _raw_read_trylock_retry(arch_rwlock_
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(arch_rwlock_t
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return;
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(arch_rwl
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -202,7 +202,7 @@ int _raw_write_trylock_retry(arch_rwlock
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return 1;
Index: linux-2.6-tip/arch/sh/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/sh/include/asm/spinlock.h
+++ linux-2.6-tip/arch/sh/include/asm/spinlock.h
@@ -100,21 +100,21 @@ static inline int arch_spin_trylock(arch
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((x)->lock > 0)
+#define arch_read_can_lock(x) ((x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_lock \n\t"
+ "movli.l @%1, %0 ! arch_read_lock \n\t"
"cmp/pl %0 \n\t"
"bf 1b \n\t"
"add #-1, %0 \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(arch_
);
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_unlock \n\t"
+ "movli.l @%1, %0 ! arch_read_unlock \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(arc
);
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_write_lock \n\t"
+ "movli.l @%1, %0 ! arch_write_lock \n\t"
"cmp/hs %2, %0 \n\t"
"bf 1b \n\t"
"sub %2, %0 \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(arch
);
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
- "mov.l %1, @%0 ! __raw_write_unlock \n\t"
+ "mov.l %1, @%0 ! arch_write_unlock \n\t"
:
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
: "t", "memory"
);
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_read_trylock \n\t"
+ "movli.l @%2, %0 ! arch_read_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/pl %0 \n\t"
"bf 2f \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(arc
return (oldval > 0);
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_write_trylock \n\t"
+ "movli.l @%2, %0 ! arch_write_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/hs %3, %0 \n\t"
"bf 2f \n\t"
@@ -216,8 +216,8 @@ static inline int __raw_write_trylock(ar
return (oldval > (RW_LOCK_BIAS - 1));
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_32.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_32.h
@@ -76,7 +76,7 @@ static inline void arch_spin_unlock(arch
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(arch_rwlock_t *rw)
+static inline void __arch_read_lock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -89,14 +89,14 @@ static inline void arch_read_lock(arch_r
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_lock(lock) \
+#define arch_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_lock(lock); \
+ __arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(arch_rwlock_t *rw)
+static inline void __arch_read_unlock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -109,14 +109,14 @@ static inline void arch_read_unlock(arch
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_unlock(lock) \
+#define arch_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_unlock(lock); \
+ __arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(arch
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(ar
return (val == 0);
}
-static inline int arch_read_trylock(arch_rwlock_t *rw)
+static inline int __arch_read_trylock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(arch
return res;
}
-#define __raw_read_trylock(lock) \
+#define arch_read_trylock(lock) \
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
- res = arch_read_trylock(lock); \
+ res = __arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
-#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
+#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
-#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
+#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
+#define arch_write_can_lock(rw) (!(rw)->lock)
#endif /* !(__ASSEMBLY__) */
Index: linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
===================================================================
--- linux-2.6-tip.orig/arch/sparc/include/asm/spinlock_64.h
+++ linux-2.6-tip/arch/sparc/include/asm/spinlock_64.h
@@ -210,17 +210,17 @@ static int inline arch_write_trylock(arc
return result;
}
-#define __raw_read_lock(p) arch_read_lock(p)
-#define __raw_read_lock_flags(p, f) arch_read_lock(p)
-#define __raw_read_trylock(p) arch_read_trylock(p)
-#define __raw_read_unlock(p) arch_read_unlock(p)
-#define __raw_write_lock(p) arch_write_lock(p)
-#define __raw_write_lock_flags(p, f) arch_write_lock(p)
-#define __raw_write_unlock(p) arch_write_unlock(p)
-#define __raw_write_trylock(p) arch_write_trylock(p)
+#define arch_read_lock(p) arch_read_lock(p)
+#define arch_read_lock_flags(p, f) arch_read_lock(p)
+#define arch_read_trylock(p) arch_read_trylock(p)
+#define arch_read_unlock(p) arch_read_unlock(p)
+#define arch_write_lock(p) arch_write_lock(p)
+#define arch_write_lock_flags(p, f) arch_write_lock(p)
+#define arch_write_unlock(p) arch_write_unlock(p)
+#define arch_write_trylock(p) arch_write_trylock(p)
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
+#define arch_write_can_lock(rw) (!(rw)->lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6-tip/arch/x86/include/asm/spinlock.h
@@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_read_can_lock(arch_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (int)(lock)->lock > 0;
}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(ar
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_write_can_lock(arch_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return (lock)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(arch_
::LOCK_PTR_REG (rw) : "memory");
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(arch
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}
-static inline int __raw_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(arc
return 0;
}
-static inline int __raw_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -284,19 +284,19 @@ static inline int __raw_write_trylock(ar
return 0;
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Index: linux-2.6-tip/include/linux/rwlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock.h
+++ linux-2.6-tip/include/linux/rwlock.h
@@ -14,7 +14,7 @@
* Released under the General Public License (GPL).
*/
-extern int __lockfunc generic__raw_read_trylock(arch_rwlock_t *lock);
+extern int __lockfunc genericarch_read_trylock(arch_rwlock_t *lock);
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
@@ -40,20 +40,20 @@ do { \
extern int _raw_write_trylock(rwlock_t *lock);
extern void _raw_write_unlock(rwlock_t *lock);
#else
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+ arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
/*
* Define the various rw_lock methods. Note we define these
Index: linux-2.6-tip/include/linux/spinlock_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_up.h
+++ linux-2.6-tip/include/linux/spinlock_up.h
@@ -49,12 +49,12 @@ static inline void arch_spin_unlock(arch
/*
* Read-write spinlocks. No debug version.
*/
-#define __raw_read_lock(lock) do { (void)(lock); } while (0)
-#define __raw_write_lock(lock) do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
@@ -67,8 +67,8 @@ static inline void arch_spin_unlock(arch
#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define __raw_read_can_lock(lock) (((void)(lock), 1))
-#define __raw_write_can_lock(lock) (((void)(lock), 1))
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
#define arch_spin_unlock_wait(lock) \
do { cpu_relax(); } while (arch_spin_is_locked(lock))
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_read_trylock(&lock->raw_lock))
+ if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -196,12 +196,12 @@ static void __read_lock_debug(rwlock_t *
void _raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_lock(&lock->raw_lock);
+ arch_read_lock(&lock->raw_lock);
}
int _raw_read_trylock(rwlock_t *lock)
{
- int ret = __raw_read_trylock(&lock->raw_lock);
+ int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
@@ -215,7 +215,7 @@ int _raw_read_trylock(rwlock_t *lock)
void _raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_unlock(&lock->raw_lock);
+ arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_write_trylock(&lock->raw_lock))
+ if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -271,13 +271,13 @@ static void __write_lock_debug(rwlock_t
void _raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- __raw_write_lock(&lock->raw_lock);
+ arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
int _raw_write_trylock(rwlock_t *lock)
{
- int ret = __raw_write_trylock(&lock->raw_lock);
+ int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
@@ -293,5 +293,5 @@ int _raw_write_trylock(rwlock_t *lock)
void _raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
- __raw_write_unlock(&lock->raw_lock);
+ arch_write_unlock(&lock->raw_lock);
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 09/23] locking: Implement new raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (7 preceding siblings ...)
2009-12-06 18:02 ` [patch 08/23] locking: Convert raw_rwlock functions " Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-08 13:33 ` Yong Zhang
2009-12-06 18:02 ` [patch 10/23] locking: Further name space cleanups Thomas Gleixner
` (13 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-implement-raw-lock.patch --]
[-- Type: text/plain, Size: 28785 bytes --]
Now that the raw_spin name space is freed up, we can implement
raw_spinlock and the related functions which are used to annotate the
locks which are not converted to sleeping spinlocks in preempt-rt.
A side effect is that only such locks can be used with the low level
lock fsunctions which circumvent lockdep.
For !rt spin_* functions are mapped to the raw_spin* implementations.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/spinlock.h | 260 +++++++++++++++++++++++++++++----------
include/linux/spinlock_api_smp.h | 51 ++++---
include/linux/spinlock_api_up.h | 2
include/linux/spinlock_types.h | 49 +++++--
kernel/mutex-debug.h | 12 -
kernel/sched.c | 2
kernel/spinlock.c | 34 ++---
lib/spinlock_debug.c | 24 +--
8 files changed, 298 insertions(+), 136 deletions(-)
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -80,7 +80,7 @@
#include <linux/spinlock_types.h>
/*
- * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -89,30 +89,30 @@
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define spin_lock_init(lock) \
+ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __spin_lock_init((lock), #lock, &__key); \
+ __raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
-# define spin_lock_init(lock) \
- do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
+# define raw_spin_lock_init(lock) \
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
#ifdef arch_spin_is_contended
-#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
+#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
-#define spin_is_contended(lock) (((void)(lock), 0))
+#define raw_spin_is_contended(lock) (((void)(lock), 0))
#endif /*arch_spin_is_contended*/
#endif
@@ -122,22 +122,37 @@ static inline void smp_mb__after_lock(vo
#endif
/**
- * spin_unlock_wait - wait until the spinlock gets unlocked
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
+#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
+ extern void _raw_spin_lock(raw_spinlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
+ extern int _raw_spin_trylock(raw_spinlock_t *lock);
+ extern void _raw_spin_unlock(raw_spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock)
-# define _raw_spin_lock_flags(lock, flags) \
- arch_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock)
+static inline void _raw_spin_lock(raw_spinlock_t *lock)
+{
+ arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+{
+ arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+ arch_spin_unlock(&lock->raw_lock);
+}
#endif
/*
@@ -146,38 +161,38 @@ static inline void smp_mb__after_lock(vo
* various methods are defined as nops in the case they are not
* required.
*/
-#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
+#define raw_spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define spin_lock(lock) _spin_lock(lock)
+#define raw_spin_lock(lock) _spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock) \
+# define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass) _spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define spin_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _spin_lock_irqsave(lock); \
@@ -186,45 +201,178 @@ static inline void smp_mb__after_lock(vo
#else
-#define spin_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_spin_lock_irqsave(lock, flags); \
} while (0)
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
- spin_lock_irqsave(lock, flags)
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ raw_spin_lock_irqsave(lock, flags)
#endif
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
-#define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define spin_unlock(lock) _spin_unlock(lock)
-#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
+#define raw_spin_lock_irq(lock) _spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _spin_unlock_irq(lock)
+
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
_spin_unlock_irqrestore(lock, flags); \
} while (0)
-#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock) _spin_unlock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) \
+#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
-#define spin_trylock_irqsave(lock, flags) \
+#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+ return &lock->rlock;
+}
+
+#define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ raw_spin_lock_init(&(_lock)->rlock); \
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ raw_spin_lock_nested(spinlock_check(lock), subclass); \
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+ return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+ return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
+})
+
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return raw_spin_can_lock(&lock->rlock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ assert_raw_spin_locked(&lock->rlock);
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -242,22 +390,4 @@ extern int _atomic_dec_and_lock(atomic_t
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define spin_can_lock(lock) (!spin_is_locked(lock))
-
-/* Include rwlock functions */
-#include <linux/rwlock.h>
-
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#endif /* __LINUX_SPINLOCK_H */
Index: linux-2.6-tip/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6-tip/include/linux/spinlock_api_smp.h
@@ -17,26 +17,29 @@
int in_lock_functions(unsigned long addr);
-#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
-void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
+void __lockfunc
+_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
+void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
__acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+unsigned long __lockfunc
+_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+int __lockfunc _spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
__releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
@@ -79,7 +82,7 @@ void __lockfunc _spin_unlock_irqrestore(
#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
#endif
-static inline int __spin_trylock(spinlock_t *lock)
+static inline int __spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
if (_raw_spin_trylock(lock)) {
@@ -97,7 +100,7 @@ static inline int __spin_trylock(spinloc
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
@@ -117,7 +120,7 @@ static inline unsigned long __spin_lock_
return flags;
}
-static inline void __spin_lock_irq(spinlock_t *lock)
+static inline void __spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
@@ -125,7 +128,7 @@ static inline void __spin_lock_irq(spinl
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-static inline void __spin_lock_bh(spinlock_t *lock)
+static inline void __spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
@@ -133,7 +136,7 @@ static inline void __spin_lock_bh(spinlo
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-static inline void __spin_lock(spinlock_t *lock)
+static inline void __spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -142,14 +145,14 @@ static inline void __spin_lock(spinlock_
#endif /* CONFIG_PREEMPT */
-static inline void __spin_unlock(spinlock_t *lock)
+static inline void __spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
preempt_enable();
}
-static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
@@ -158,7 +161,7 @@ static inline void __spin_unlock_irqrest
preempt_enable();
}
-static inline void __spin_unlock_irq(spinlock_t *lock)
+static inline void __spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
@@ -166,7 +169,7 @@ static inline void __spin_unlock_irq(spi
preempt_enable();
}
-static inline void __spin_unlock_bh(spinlock_t *lock)
+static inline void __spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
@@ -174,7 +177,7 @@ static inline void __spin_unlock_bh(spin
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline int __spin_trylock_bh(spinlock_t *lock)
+static inline int __spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
Index: linux-2.6-tip/include/linux/spinlock_api_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_api_up.h
+++ linux-2.6-tip/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
-#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
Index: linux-2.6-tip/include/linux/spinlock_types.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_types.h
+++ linux-2.6-tip/include/linux/spinlock_types.h
@@ -17,7 +17,7 @@
#include <linux/lockdep.h>
-typedef struct {
+typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
@@ -29,7 +29,7 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+} raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
@@ -42,17 +42,44 @@ typedef struct {
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- .magic = SPINLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- SPIN_DEP_MAP_INIT(lockname) }
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEP_MAP_INIT(lockname) }
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
/*
* SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
Index: linux-2.6-tip/kernel/mutex-debug.h
===================================================================
--- linux-2.6-tip.orig/kernel/mutex-debug.h
+++ linux-2.6-tip/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(str
\
DEBUG_LOCKS_WARN_ON(in_interrupt()); \
local_irq_save(flags); \
- arch_spin_lock(&(lock)->raw_lock); \
+ arch_spin_lock(&(lock)->rlock.raw_lock);\
DEBUG_LOCKS_WARN_ON(l->magic != l); \
} while (0)
-#define spin_unlock_mutex(lock, flags) \
- do { \
- arch_spin_unlock(&(lock)->raw_lock); \
- local_irq_restore(flags); \
- preempt_check_resched(); \
+#define spin_unlock_mutex(lock, flags) \
+ do { \
+ arch_spin_unlock(&(lock)->rlock.raw_lock); \
+ local_irq_restore(flags); \
+ preempt_check_resched(); \
} while (0)
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -883,7 +883,7 @@ static inline void finish_lock_switch(st
{
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
+ rq->lock.rlock.owner = current;
#endif
/*
* If we are tracking spinlock dependencies then we have to
Index: linux-2.6-tip/kernel/spinlock.c
===================================================================
--- linux-2.6-tip.orig/kernel/spinlock.c
+++ linux-2.6-tip/kernel/spinlock.c
@@ -32,6 +32,8 @@
* include/linux/spinlock_api_smp.h
*/
#else
+#define raw_read_can_lock(l) read_can_lock(l)
+#define raw_write_can_lock(l) write_can_lock(l)
/*
* We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function
@@ -52,7 +54,7 @@ void __lockfunc __##op##_lock(locktype##
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
+ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
@@ -72,7 +74,7 @@ unsigned long __lockfunc __##op##_lock_i
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
+ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
@@ -107,14 +109,14 @@ void __lockfunc __##op##_lock_bh(locktyp
* __[spin|read|write]_lock_irqsave()
* __[spin|read|write]_lock_bh()
*/
-BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(spin, raw_spinlock);
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
-int __lockfunc _spin_trylock(spinlock_t *lock)
+int __lockfunc _spin_trylock(raw_spinlock_t *lock)
{
return __spin_trylock(lock);
}
@@ -122,7 +124,7 @@ EXPORT_SYMBOL(_spin_trylock);
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
-int __lockfunc _spin_trylock_bh(spinlock_t *lock)
+int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock)
{
return __spin_trylock_bh(lock);
}
@@ -130,7 +132,7 @@ EXPORT_SYMBOL(_spin_trylock_bh);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK
-void __lockfunc _spin_lock(spinlock_t *lock)
+void __lockfunc _spin_lock(raw_spinlock_t *lock)
{
__spin_lock(lock);
}
@@ -138,7 +140,7 @@ EXPORT_SYMBOL(_spin_lock);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
{
return __spin_lock_irqsave(lock);
}
@@ -146,7 +148,7 @@ EXPORT_SYMBOL(_spin_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
-void __lockfunc _spin_lock_irq(spinlock_t *lock)
+void __lockfunc _spin_lock_irq(raw_spinlock_t *lock)
{
__spin_lock_irq(lock);
}
@@ -154,7 +156,7 @@ EXPORT_SYMBOL(_spin_lock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
-void __lockfunc _spin_lock_bh(spinlock_t *lock)
+void __lockfunc _spin_lock_bh(raw_spinlock_t *lock)
{
__spin_lock_bh(lock);
}
@@ -162,7 +164,7 @@ EXPORT_SYMBOL(_spin_lock_bh);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK
-void __lockfunc _spin_unlock(spinlock_t *lock)
+void __lockfunc _spin_unlock(raw_spinlock_t *lock)
{
__spin_unlock(lock);
}
@@ -170,7 +172,7 @@ EXPORT_SYMBOL(_spin_unlock);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+void __lockfunc _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
{
__spin_unlock_irqrestore(lock, flags);
}
@@ -178,7 +180,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)
+void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock)
{
__spin_unlock_irq(lock);
}
@@ -186,7 +188,7 @@ EXPORT_SYMBOL(_spin_unlock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)
+void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock)
{
__spin_unlock_bh(lock);
}
@@ -339,7 +341,7 @@ EXPORT_SYMBOL(_write_unlock_bh);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
@@ -347,7 +349,7 @@ void __lockfunc _spin_lock_nested(spinlo
}
EXPORT_SYMBOL(_spin_lock_nested);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
+unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock,
int subclass)
{
unsigned long flags;
@@ -361,7 +363,7 @@ unsigned long __lockfunc _spin_lock_irqs
}
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
-void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -29,7 +29,7 @@ void __spin_lock_init(spinlock_t *lock,
lock->owner_cpu = -1;
}
-EXPORT_SYMBOL(__spin_lock_init);
+EXPORT_SYMBOL(__raw_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const
EXPORT_SYMBOL(__rwlock_init);
-static void spin_bug(spinlock_t *lock, const char *msg)
+static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, c
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
-debug_spin_lock_before(spinlock_t *lock)
+debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
-static inline void debug_spin_lock_after(spinlock_t *lock)
+static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
-static inline void debug_spin_unlock(spinlock_t *lock)
+static inline void debug_spin_unlock(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
+ SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spi
lock->owner_cpu = -1;
}
-static void __spin_lock_debug(spinlock_t *lock)
+static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
@@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t
}
}
-void _raw_spin_lock(spinlock_t *lock)
+void _raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
@@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock)
debug_spin_lock_after(lock);
}
-int _raw_spin_trylock(spinlock_t *lock)
+int _raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&lock->raw_lock);
@@ -148,7 +148,7 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
-void _raw_spin_unlock(spinlock_t *lock)
+void _raw_spin_unlock(raw_spinlock_t *lock)
{
debug_spin_unlock(lock);
arch_spin_unlock(&lock->raw_lock);
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 10/23] locking: Further name space cleanups
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (8 preceding siblings ...)
2009-12-06 18:02 ` [patch 09/23] locking: Implement new raw_spinlock Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 11/23] locking: Cleanup the name space completely Thomas Gleixner
` (12 subsequent siblings)
22 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-fixup-namespace.patch --]
[-- Type: text/plain, Size: 17305 bytes --]
The name space hierarchy for the internal lock functions is now a bit
backwards. raw_spin* functions map to _spin* which use __spin*, while
we would like to have _raw_spin* and __raw_spin*.
_raw_spin* is already used by lock debugging, so rename those funtions
to do_raw_spin* to free up the _raw_spin* name space.
Nu functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rwlock.h | 32 +++++++++++++++----------------
include/linux/rwlock_api_smp.h | 40 +++++++++++++++++++--------------------
include/linux/spinlock.h | 20 +++++++++----------
include/linux/spinlock_api_smp.h | 24 +++++++++++------------
kernel/sched.c | 2 -
kernel/spinlock.c | 12 +++++------
lib/spinlock_debug.c | 6 ++---
7 files changed, 68 insertions(+), 68 deletions(-)
Index: linux-2.6-tip/include/linux/rwlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock.h
+++ linux-2.6-tip/include/linux/rwlock.h
@@ -31,25 +31,25 @@ do { \
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
+ extern void do_raw_read_lock(rwlock_t *lock);
+#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
+ extern int do_raw_read_trylock(rwlock_t *lock);
+ extern void do_raw_read_unlock(rwlock_t *lock);
+ extern void do_raw_write_lock(rwlock_t *lock);
+#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
+ extern int do_raw_write_trylock(rwlock_t *lock);
+ extern void do_raw_write_unlock(rwlock_t *lock);
#else
-# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
+# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
+# define do_raw_read_lock_flags(lock, flags) \
arch_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
+# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
+# define do_raw_write_lock_flags(lock, flags) \
arch_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
+# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
#endif
#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
Index: linux-2.6-tip/include/linux/rwlock_api_smp.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock_api_smp.h
+++ linux-2.6-tip/include/linux/rwlock_api_smp.h
@@ -113,7 +113,7 @@ void __lockfunc _write_unlock_irqrestore
static inline int __read_trylock(rwlock_t *lock)
{
preempt_disable();
- if (_raw_read_trylock(lock)) {
+ if (do_raw_read_trylock(lock)) {
rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -124,7 +124,7 @@ static inline int __read_trylock(rwlock_
static inline int __write_trylock(rwlock_t *lock)
{
preempt_disable();
- if (_raw_write_trylock(lock)) {
+ if (do_raw_write_trylock(lock)) {
rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -143,7 +143,7 @@ static inline void __read_lock(rwlock_t
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
@@ -153,8 +153,8 @@ static inline unsigned long __read_lock_
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
+ do_raw_read_lock_flags, &flags);
return flags;
}
@@ -163,7 +163,7 @@ static inline void __read_lock_irq(rwloc
local_irq_disable();
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
static inline void __read_lock_bh(rwlock_t *lock)
@@ -171,7 +171,7 @@ static inline void __read_lock_bh(rwlock
local_bh_disable();
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
@@ -181,8 +181,8 @@ static inline unsigned long __write_lock
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
+ do_raw_write_lock_flags, &flags);
return flags;
}
@@ -191,7 +191,7 @@ static inline void __write_lock_irq(rwlo
local_irq_disable();
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
static inline void __write_lock_bh(rwlock_t *lock)
@@ -199,14 +199,14 @@ static inline void __write_lock_bh(rwloc
local_bh_disable();
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
static inline void __write_lock(rwlock_t *lock)
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
#endif /* CONFIG_PREEMPT */
@@ -214,21 +214,21 @@ static inline void __write_lock(rwlock_t
static inline void __write_unlock(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
+ do_raw_write_unlock(lock);
preempt_enable();
}
static inline void __read_unlock(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_read_unlock(lock);
preempt_enable();
}
static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_read_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
@@ -236,7 +236,7 @@ static inline void __read_unlock_irqrest
static inline void __read_unlock_irq(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_read_unlock(lock);
local_irq_enable();
preempt_enable();
}
@@ -244,7 +244,7 @@ static inline void __read_unlock_irq(rwl
static inline void __read_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_read_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
@@ -253,7 +253,7 @@ static inline void __write_unlock_irqres
unsigned long flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
+ do_raw_write_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
@@ -261,7 +261,7 @@ static inline void __write_unlock_irqres
static inline void __write_unlock_irq(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
+ do_raw_write_unlock(lock);
local_irq_enable();
preempt_enable();
}
@@ -269,7 +269,7 @@ static inline void __write_unlock_irq(rw
static inline void __write_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
+ do_raw_write_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -89,13 +89,13 @@
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ extern void _do_raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key);
# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __raw_spin_lock_init((lock), #lock, &__key); \
+ _do_raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
@@ -128,28 +128,28 @@ static inline void smp_mb__after_lock(vo
#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(raw_spinlock_t *lock);
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(raw_spinlock_t *lock);
- extern void _raw_spin_unlock(raw_spinlock_t *lock);
+ extern void do_raw_spin_lock(raw_spinlock_t *lock);
+#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock);
#else
-static inline void _raw_spin_lock(raw_spinlock_t *lock)
+static inline void do_raw_spin_lock(raw_spinlock_t *lock)
{
arch_spin_lock(&lock->raw_lock);
}
static inline void
-_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
{
arch_spin_lock_flags(&lock->raw_lock, *flags);
}
-static inline int _raw_spin_trylock(raw_spinlock_t *lock)
+static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
return arch_spin_trylock(&(lock)->raw_lock);
}
-static inline void _raw_spin_unlock(raw_spinlock_t *lock)
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
{
arch_spin_unlock(&lock->raw_lock);
}
Index: linux-2.6-tip/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6-tip/include/linux/spinlock_api_smp.h
@@ -85,7 +85,7 @@ _spin_unlock_irqrestore(raw_spinlock_t *
static inline int __spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -109,13 +109,13 @@ static inline unsigned long __spin_lock_
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
+ * do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
- _raw_spin_lock_flags(lock, &flags);
+ do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
@@ -125,7 +125,7 @@ static inline void __spin_lock_irq(raw_s
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
static inline void __spin_lock_bh(raw_spinlock_t *lock)
@@ -133,14 +133,14 @@ static inline void __spin_lock_bh(raw_sp
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
static inline void __spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#endif /* CONFIG_PREEMPT */
@@ -148,7 +148,7 @@ static inline void __spin_lock(raw_spinl
static inline void __spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable();
}
@@ -156,7 +156,7 @@ static inline void __spin_unlock_irqrest
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
@@ -164,7 +164,7 @@ static inline void __spin_unlock_irqrest
static inline void __spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
@@ -172,7 +172,7 @@ static inline void __spin_unlock_irq(raw
static inline void __spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
@@ -181,7 +181,7 @@ static inline int __spin_trylock_bh(raw_
{
local_bh_disable();
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -6703,7 +6703,7 @@ SYSCALL_DEFINE0(sched_yield)
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
- _raw_spin_unlock(&rq->lock);
+ do_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule();
Index: linux-2.6-tip/kernel/spinlock.c
===================================================================
--- linux-2.6-tip.orig/kernel/spinlock.c
+++ linux-2.6-tip/kernel/spinlock.c
@@ -48,7 +48,7 @@ void __lockfunc __##op##_lock(locktype##
{ \
for (;;) { \
preempt_disable(); \
- if (likely(_raw_##op##_trylock(lock))) \
+ if (likely(do_raw_##op##_trylock(lock))) \
break; \
preempt_enable(); \
\
@@ -67,7 +67,7 @@ unsigned long __lockfunc __##op##_lock_i
for (;;) { \
preempt_disable(); \
local_irq_save(flags); \
- if (likely(_raw_##op##_trylock(lock))) \
+ if (likely(do_raw_##op##_trylock(lock))) \
break; \
local_irq_restore(flags); \
preempt_enable(); \
@@ -345,7 +345,7 @@ void __lockfunc _spin_lock_nested(raw_sp
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nested);
@@ -357,8 +357,8 @@ unsigned long __lockfunc _spin_lock_irqs
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
- _raw_spin_lock_flags, &flags);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
+ do_raw_spin_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
@@ -368,7 +368,7 @@ void __lockfunc _spin_lock_nest_lock(raw
{
preempt_disable();
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nest_lock);
Index: linux-2.6-tip/lib/spinlock_debug.c
===================================================================
--- linux-2.6-tip.orig/lib/spinlock_debug.c
+++ linux-2.6-tip/lib/spinlock_debug.c
@@ -193,13 +193,13 @@ static void __read_lock_debug(rwlock_t *
}
#endif
-void _raw_read_lock(rwlock_t *lock)
+void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
arch_read_lock(&lock->raw_lock);
}
-int _raw_read_trylock(rwlock_t *lock)
+int do_raw_read_trylock(rwlock_t *lock)
{
int ret = arch_read_trylock(&lock->raw_lock);
@@ -212,7 +212,7 @@ int _raw_read_trylock(rwlock_t *lock)
return ret;
}
-void _raw_read_unlock(rwlock_t *lock)
+void do_raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
arch_read_unlock(&lock->raw_lock);
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 11/23] locking: Cleanup the name space completely
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (9 preceding siblings ...)
2009-12-06 18:02 ` [patch 10/23] locking: Further name space cleanups Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 12/23] bkl: Fixup core_lock fallout Thomas Gleixner
` (11 subsequent siblings)
22 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: locking-fixup-namespace-2.patch --]
[-- Type: text/plain, Size: 41049 bytes --]
Make the name space hierarchy of locking functions consistent:
raw_spin* -> _raw_spin* -> __raw_spin*
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rwlock.h | 50 +++++-----
include/linux/rwlock_api_smp.h | 113 +++++++++++-----------
include/linux/spinlock.h | 37 ++++---
include/linux/spinlock_api_smp.h | 77 +++++++--------
include/linux/spinlock_api_up.h | 64 ++++++-------
kernel/spinlock.c | 192 +++++++++++++++++++--------------------
6 files changed, 273 insertions(+), 260 deletions(-)
Index: linux-2.6-tip/include/linux/rwlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock.h
+++ linux-2.6-tip/include/linux/rwlock.h
@@ -40,7 +40,7 @@ do { \
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock);
#else
-# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
+# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
# define do_raw_read_lock_flags(lock, flags) \
arch_read_lock_flags(&(lock)->raw_lock, *(flags))
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
@@ -60,23 +60,23 @@ do { \
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
+#define write_lock(lock) _raw_write_lock(lock)
+#define read_lock(lock) _raw_read_lock(lock)
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
+ flags = _raw_read_lock_irqsave(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
+ flags = _raw_write_lock_irqsave(lock); \
} while (0)
#else
@@ -84,38 +84,38 @@ do { \
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
+ _raw_read_lock_irqsave(lock, flags); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
+ _raw_write_lock_irqsave(lock, flags); \
} while (0)
#endif
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
-#define read_unlock(lock) _read_unlock(lock)
-#define write_unlock(lock) _write_unlock(lock)
-#define read_unlock_irq(lock) _read_unlock_irq(lock)
-#define write_unlock_irq(lock) _write_unlock_irq(lock)
-
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
+#define read_lock_irq(lock) _raw_read_lock_irq(lock)
+#define read_lock_bh(lock) _raw_read_lock_bh(lock)
+#define write_lock_irq(lock) _raw_write_lock_irq(lock)
+#define write_lock_bh(lock) _raw_write_lock_bh(lock)
+#define read_unlock(lock) _raw_read_unlock(lock)
+#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
+#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_unlock_irqrestore(lock, flags); \
} while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
+#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
#define write_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
+ _raw_write_unlock_irqrestore(lock, flags); \
} while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
+#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
#define write_trylock_irqsave(lock, flags) \
({ \
Index: linux-2.6-tip/include/linux/rwlock_api_smp.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rwlock_api_smp.h
+++ linux-2.6-tip/include/linux/rwlock_api_smp.h
@@ -15,102 +15,106 @@
* Released under the General Public License (GPL).
*/
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+int __lockfunc _raw_read_trylock(rwlock_t *lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc
+_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
+#define _raw_read_lock(lock) __raw_read_lock(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
+#define _raw_write_lock(lock) __raw_write_lock(lock)
#endif
#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
+#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
+#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
+#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
+#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
+#define _raw_read_trylock(lock) __raw_read_trylock(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
+#define _raw_write_trylock(lock) __raw_write_trylock(lock)
#endif
#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
+#define _raw_read_unlock(lock) __raw_read_unlock(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
+#define _raw_write_unlock(lock) __raw_write_unlock(lock)
#endif
#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __raw_read_unlock_irqrestore(lock, flags)
#endif
#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __raw_write_unlock_irqrestore(lock, flags)
#endif
-static inline int __read_trylock(rwlock_t *lock)
+static inline int __raw_read_trylock(rwlock_t *lock)
{
preempt_disable();
if (do_raw_read_trylock(lock)) {
@@ -121,7 +125,7 @@ static inline int __read_trylock(rwlock_
return 0;
}
-static inline int __write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(rwlock_t *lock)
{
preempt_disable();
if (do_raw_write_trylock(lock)) {
@@ -139,14 +143,14 @@ static inline int __write_trylock(rwlock
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
+static inline void __raw_read_lock(rwlock_t *lock)
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
+static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;
@@ -158,7 +162,7 @@ static inline unsigned long __read_lock_
return flags;
}
-static inline void __read_lock_irq(rwlock_t *lock)
+static inline void __raw_read_lock_irq(rwlock_t *lock)
{
local_irq_disable();
preempt_disable();
@@ -166,7 +170,7 @@ static inline void __read_lock_irq(rwloc
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
-static inline void __read_lock_bh(rwlock_t *lock)
+static inline void __raw_read_lock_bh(rwlock_t *lock)
{
local_bh_disable();
preempt_disable();
@@ -174,7 +178,7 @@ static inline void __read_lock_bh(rwlock
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
}
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
+static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;
@@ -186,7 +190,7 @@ static inline unsigned long __write_lock
return flags;
}
-static inline void __write_lock_irq(rwlock_t *lock)
+static inline void __raw_write_lock_irq(rwlock_t *lock)
{
local_irq_disable();
preempt_disable();
@@ -194,7 +198,7 @@ static inline void __write_lock_irq(rwlo
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
-static inline void __write_lock_bh(rwlock_t *lock)
+static inline void __raw_write_lock_bh(rwlock_t *lock)
{
local_bh_disable();
preempt_disable();
@@ -202,7 +206,7 @@ static inline void __write_lock_bh(rwloc
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
-static inline void __write_lock(rwlock_t *lock)
+static inline void __raw_write_lock(rwlock_t *lock)
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -211,21 +215,22 @@ static inline void __write_lock(rwlock_t
#endif /* CONFIG_PREEMPT */
-static inline void __write_unlock(rwlock_t *lock)
+static inline void __raw_write_unlock(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
preempt_enable();
}
-static inline void __read_unlock(rwlock_t *lock)
+static inline void __raw_read_unlock(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
preempt_enable();
}
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+static inline void
+__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
@@ -233,7 +238,7 @@ static inline void __read_unlock_irqrest
preempt_enable();
}
-static inline void __read_unlock_irq(rwlock_t *lock)
+static inline void __raw_read_unlock_irq(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
@@ -241,7 +246,7 @@ static inline void __read_unlock_irq(rwl
preempt_enable();
}
-static inline void __read_unlock_bh(rwlock_t *lock)
+static inline void __raw_read_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_read_unlock(lock);
@@ -249,7 +254,7 @@ static inline void __read_unlock_bh(rwlo
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
+static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
@@ -258,7 +263,7 @@ static inline void __write_unlock_irqres
preempt_enable();
}
-static inline void __write_unlock_irq(rwlock_t *lock)
+static inline void __raw_write_unlock_irq(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
@@ -266,7 +271,7 @@ static inline void __write_unlock_irq(rw
preempt_enable();
}
-static inline void __write_unlock_bh(rwlock_t *lock)
+static inline void __raw_write_unlock_bh(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
do_raw_write_unlock(lock);
Index: linux-2.6-tip/include/linux/spinlock.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock.h
+++ linux-2.6-tip/include/linux/spinlock.h
@@ -161,20 +161,22 @@ static inline void do_raw_spin_unlock(ra
* various methods are defined as nops in the case they are not
* required.
*/
-#define raw_spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
+#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
-#define raw_spin_lock(lock) _spin_lock(lock)
+#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock_nested(lock, subclass)
+
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
- _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define raw_spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define raw_spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -182,20 +184,20 @@ static inline void do_raw_spin_unlock(ra
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave_nested(lock, subclass); \
+ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
@@ -204,7 +206,7 @@ static inline void do_raw_spin_unlock(ra
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _spin_lock_irqsave(lock, flags); \
+ _raw_spin_lock_irqsave(lock, flags); \
} while (0)
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
@@ -212,19 +214,20 @@ static inline void do_raw_spin_unlock(ra
#endif
-#define raw_spin_lock_irq(lock) _spin_lock_irq(lock)
-#define raw_spin_lock_bh(lock) _spin_lock_bh(lock)
-#define raw_spin_unlock(lock) _spin_unlock(lock)
-#define raw_spin_unlock_irq(lock) _spin_unlock_irq(lock)
+#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
#define raw_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _spin_unlock_irqrestore(lock, flags); \
+ _raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
-#define raw_spin_unlock_bh(lock) _spin_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define raw_spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) \
+ __cond_lock(lock, _raw_spin_trylock_bh(lock))
#define raw_spin_trylock_irq(lock) \
({ \
Index: linux-2.6-tip/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6-tip/include/linux/spinlock_api_smp.h
@@ -19,70 +19,71 @@ int in_lock_functions(unsigned long addr
#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
-void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
- __acquires(lock);
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
void __lockfunc
-_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
- __acquires(lock);
-void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock);
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
- __acquires(lock);
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
unsigned long __lockfunc
-_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
- __acquires(lock);
-int __lockfunc _spin_trylock(raw_spinlock_t *lock);
-int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
-void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
void __lockfunc
-_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
- __releases(lock);
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
-#define _spin_lock(lock) __spin_lock(lock)
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
-#define _spin_lock_bh(lock) __spin_lock_bh(lock)
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
-#define _spin_lock_irq(lock) __spin_lock_irq(lock)
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
-#define _spin_trylock(lock) __spin_trylock(lock)
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
-#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK
-#define _spin_unlock(lock) __spin_unlock(lock)
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
-#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif
-static inline int __spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
if (do_raw_spin_trylock(lock)) {
@@ -100,7 +101,7 @@ static inline int __spin_trylock(raw_spi
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
@@ -120,7 +121,7 @@ static inline unsigned long __spin_lock_
return flags;
}
-static inline void __spin_lock_irq(raw_spinlock_t *lock)
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
@@ -128,7 +129,7 @@ static inline void __spin_lock_irq(raw_s
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __spin_lock_bh(raw_spinlock_t *lock)
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
@@ -136,7 +137,7 @@ static inline void __spin_lock_bh(raw_sp
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -145,14 +146,14 @@ static inline void __spin_lock(raw_spinl
#endif /* CONFIG_PREEMPT */
-static inline void __spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
preempt_enable();
}
-static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
@@ -161,7 +162,7 @@ static inline void __spin_unlock_irqrest
preempt_enable();
}
-static inline void __spin_unlock_irq(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -169,7 +170,7 @@ static inline void __spin_unlock_irq(raw
preempt_enable();
}
-static inline void __spin_unlock_bh(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -177,7 +178,7 @@ static inline void __spin_unlock_bh(raw_
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline int __spin_trylock_bh(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
Index: linux-2.6-tip/include/linux/spinlock_api_up.h
===================================================================
--- linux-2.6-tip.orig/include/linux/spinlock_api_up.h
+++ linux-2.6-tip/include/linux/spinlock_api_up.h
@@ -40,7 +40,8 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable_no_resched(); local_bh_enable(); \
+ __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
-#define _spin_lock(lock) __LOCK(lock)
-#define _spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _read_lock(lock) __LOCK(lock)
-#define _write_lock(lock) __LOCK(lock)
-#define _spin_lock_bh(lock) __LOCK_BH(lock)
-#define _read_lock_bh(lock) __LOCK_BH(lock)
-#define _write_lock_bh(lock) __LOCK_BH(lock)
-#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _read_lock_irq(lock) __LOCK_IRQ(lock)
-#define _write_lock_irq(lock) __LOCK_IRQ(lock)
-#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
-#define _spin_unlock(lock) __UNLOCK(lock)
-#define _read_unlock(lock) __UNLOCK(lock)
-#define _write_unlock(lock) __UNLOCK(lock)
-#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_spin_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_write_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
#endif /* __LINUX_SPINLOCK_API_UP_H */
Index: linux-2.6-tip/kernel/spinlock.c
===================================================================
--- linux-2.6-tip.orig/kernel/spinlock.c
+++ linux-2.6-tip/kernel/spinlock.c
@@ -44,7 +44,7 @@
* towards that other CPU that it should break the lock ASAP.
*/
#define BUILD_LOCK_OPS(op, locktype) \
-void __lockfunc __##op##_lock(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
{ \
for (;;) { \
preempt_disable(); \
@@ -60,7 +60,7 @@ void __lockfunc __##op##_lock(locktype##
(lock)->break_lock = 0; \
} \
\
-unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
+unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -81,12 +81,12 @@ unsigned long __lockfunc __##op##_lock_i
return flags; \
} \
\
-void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
{ \
- _##op##_lock_irqsave(lock); \
+ _raw_##op##_lock_irqsave(lock); \
} \
\
-void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -95,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktyp
/* irq-disabling. We use the generic preemption-aware */ \
/* function: */ \
/**/ \
- flags = _##op##_lock_irqsave(lock); \
+ flags = _raw_##op##_lock_irqsave(lock); \
local_bh_disable(); \
local_irq_restore(flags); \
} \
@@ -116,240 +116,240 @@ BUILD_LOCK_OPS(write, rwlock);
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
-int __lockfunc _spin_trylock(raw_spinlock_t *lock)
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
{
- return __spin_trylock(lock);
+ return __raw_spin_trylock(lock);
}
-EXPORT_SYMBOL(_spin_trylock);
+EXPORT_SYMBOL(_raw_spin_trylock);
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
-int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock)
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
{
- return __spin_trylock_bh(lock);
+ return __raw_spin_trylock_bh(lock);
}
-EXPORT_SYMBOL(_spin_trylock_bh);
+EXPORT_SYMBOL(_raw_spin_trylock_bh);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK
-void __lockfunc _spin_lock(raw_spinlock_t *lock)
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
- __spin_lock(lock);
+ __raw_spin_lock(lock);
}
-EXPORT_SYMBOL(_spin_lock);
+EXPORT_SYMBOL(_raw_spin_lock);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
- return __spin_lock_irqsave(lock);
+ return __raw_spin_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_spin_lock_irqsave);
+EXPORT_SYMBOL(_raw_spin_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
-void __lockfunc _spin_lock_irq(raw_spinlock_t *lock)
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
{
- __spin_lock_irq(lock);
+ __raw_spin_lock_irq(lock);
}
-EXPORT_SYMBOL(_spin_lock_irq);
+EXPORT_SYMBOL(_raw_spin_lock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
-void __lockfunc _spin_lock_bh(raw_spinlock_t *lock)
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
{
- __spin_lock_bh(lock);
+ __raw_spin_lock_bh(lock);
}
-EXPORT_SYMBOL(_spin_lock_bh);
+EXPORT_SYMBOL(_raw_spin_lock_bh);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK
-void __lockfunc _spin_unlock(raw_spinlock_t *lock)
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
{
- __spin_unlock(lock);
+ __raw_spin_unlock(lock);
}
-EXPORT_SYMBOL(_spin_unlock);
+EXPORT_SYMBOL(_raw_spin_unlock);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-void __lockfunc _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
{
- __spin_unlock_irqrestore(lock, flags);
+ __raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_spin_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock)
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
{
- __spin_unlock_irq(lock);
+ __raw_spin_unlock_irq(lock);
}
-EXPORT_SYMBOL(_spin_unlock_irq);
+EXPORT_SYMBOL(_raw_spin_unlock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
-void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock)
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
{
- __spin_unlock_bh(lock);
+ __raw_spin_unlock_bh(lock);
}
-EXPORT_SYMBOL(_spin_unlock_bh);
+EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
#ifndef CONFIG_INLINE_READ_TRYLOCK
-int __lockfunc _read_trylock(rwlock_t *lock)
+int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
- return __read_trylock(lock);
+ return __raw_read_trylock(lock);
}
-EXPORT_SYMBOL(_read_trylock);
+EXPORT_SYMBOL(_raw_read_trylock);
#endif
#ifndef CONFIG_INLINE_READ_LOCK
-void __lockfunc _read_lock(rwlock_t *lock)
+void __lockfunc _raw_read_lock(rwlock_t *lock)
{
- __read_lock(lock);
+ __raw_read_lock(lock);
}
-EXPORT_SYMBOL(_read_lock);
+EXPORT_SYMBOL(_raw_read_lock);
#endif
#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
{
- return __read_lock_irqsave(lock);
+ return __raw_read_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_read_lock_irqsave);
+EXPORT_SYMBOL(_raw_read_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_READ_LOCK_IRQ
-void __lockfunc _read_lock_irq(rwlock_t *lock)
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
{
- __read_lock_irq(lock);
+ __raw_read_lock_irq(lock);
}
-EXPORT_SYMBOL(_read_lock_irq);
+EXPORT_SYMBOL(_raw_read_lock_irq);
#endif
#ifndef CONFIG_INLINE_READ_LOCK_BH
-void __lockfunc _read_lock_bh(rwlock_t *lock)
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
{
- __read_lock_bh(lock);
+ __raw_read_lock_bh(lock);
}
-EXPORT_SYMBOL(_read_lock_bh);
+EXPORT_SYMBOL(_raw_read_lock_bh);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK
-void __lockfunc _read_unlock(rwlock_t *lock)
+void __lockfunc _raw_read_unlock(rwlock_t *lock)
{
- __read_unlock(lock);
+ __raw_read_unlock(lock);
}
-EXPORT_SYMBOL(_read_unlock);
+EXPORT_SYMBOL(_raw_read_unlock);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- __read_unlock_irqrestore(lock, flags);
+ __raw_read_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_read_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
-void __lockfunc _read_unlock_irq(rwlock_t *lock)
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
{
- __read_unlock_irq(lock);
+ __raw_read_unlock_irq(lock);
}
-EXPORT_SYMBOL(_read_unlock_irq);
+EXPORT_SYMBOL(_raw_read_unlock_irq);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_BH
-void __lockfunc _read_unlock_bh(rwlock_t *lock)
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
{
- __read_unlock_bh(lock);
+ __raw_read_unlock_bh(lock);
}
-EXPORT_SYMBOL(_read_unlock_bh);
+EXPORT_SYMBOL(_raw_read_unlock_bh);
#endif
#ifndef CONFIG_INLINE_WRITE_TRYLOCK
-int __lockfunc _write_trylock(rwlock_t *lock)
+int __lockfunc _raw_write_trylock(rwlock_t *lock)
{
- return __write_trylock(lock);
+ return __raw_write_trylock(lock);
}
-EXPORT_SYMBOL(_write_trylock);
+EXPORT_SYMBOL(_raw_write_trylock);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK
-void __lockfunc _write_lock(rwlock_t *lock)
+void __lockfunc _raw_write_lock(rwlock_t *lock)
{
- __write_lock(lock);
+ __raw_write_lock(lock);
}
-EXPORT_SYMBOL(_write_lock);
+EXPORT_SYMBOL(_raw_write_lock);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
{
- return __write_lock_irqsave(lock);
+ return __raw_write_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_write_lock_irqsave);
+EXPORT_SYMBOL(_raw_write_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
-void __lockfunc _write_lock_irq(rwlock_t *lock)
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
{
- __write_lock_irq(lock);
+ __raw_write_lock_irq(lock);
}
-EXPORT_SYMBOL(_write_lock_irq);
+EXPORT_SYMBOL(_raw_write_lock_irq);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_BH
-void __lockfunc _write_lock_bh(rwlock_t *lock)
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
{
- __write_lock_bh(lock);
+ __raw_write_lock_bh(lock);
}
-EXPORT_SYMBOL(_write_lock_bh);
+EXPORT_SYMBOL(_raw_write_lock_bh);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK
-void __lockfunc _write_unlock(rwlock_t *lock)
+void __lockfunc _raw_write_unlock(rwlock_t *lock)
{
- __write_unlock(lock);
+ __raw_write_unlock(lock);
}
-EXPORT_SYMBOL(_write_unlock);
+EXPORT_SYMBOL(_raw_write_unlock);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- __write_unlock_irqrestore(lock, flags);
+ __raw_write_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_write_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-void __lockfunc _write_unlock_irq(rwlock_t *lock)
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
{
- __write_unlock_irq(lock);
+ __raw_write_unlock_irq(lock);
}
-EXPORT_SYMBOL(_write_unlock_irq);
+EXPORT_SYMBOL(_raw_write_unlock_irq);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
-void __lockfunc _write_unlock_bh(rwlock_t *lock)
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
{
- __write_unlock_bh(lock);
+ __raw_write_unlock_bh(lock);
}
-EXPORT_SYMBOL(_write_unlock_bh);
+EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_lock_nested);
+EXPORT_SYMBOL(_raw_spin_lock_nested);
-unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock,
+unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
int subclass)
{
unsigned long flags;
@@ -361,16 +361,16 @@ unsigned long __lockfunc _spin_lock_irqs
do_raw_spin_lock_flags, &flags);
return flags;
}
-EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
-void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock,
+void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_lock_nest_lock);
+EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
#endif
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 12/23] bkl: Fixup core_lock fallout
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (10 preceding siblings ...)
2009-12-06 18:02 ` [patch 11/23] locking: Cleanup the name space completely Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 13/23] plist: Make plist debugging raw_spinlock aware Thomas Gleixner
` (10 subsequent siblings)
22 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: bkl-core-lock-fixup.patch --]
[-- Type: text/plain, Size: 904 bytes --]
kernel_lock.c emits a warning because a raw spinlock function is used
with a spinlock. Convert BKL to raw_spinlock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/kernel_lock.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
Index: linux-2.6-tip/lib/kernel_lock.c
===================================================================
--- linux-2.6-tip.orig/lib/kernel_lock.c
+++ linux-2.6-tip/lib/kernel_lock.c
@@ -23,7 +23,7 @@
*
* Don't use in new code.
*/
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
/*
@@ -82,7 +82,7 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
- while (spin_is_locked(&kernel_flag))
+ while (raw_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 13/23] plist: Make plist debugging raw_spinlock aware
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (11 preceding siblings ...)
2009-12-06 18:02 ` [patch 12/23] bkl: Fixup core_lock fallout Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-07 1:21 ` Frederic Weisbecker
2009-12-06 18:03 ` [patch 14/23] sched: Convert rq->lock to raw_spinlock Thomas Gleixner
` (9 subsequent siblings)
22 siblings, 2 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: plist-make-core-lock-aware.patch --]
[-- Type: text/plain, Size: 4593 bytes --]
plists are used with spinlocks and raw_spinlocks. Change the plist
debugging to handle both types.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/plist.h | 43 +++++++++++++++++++++++++++++++++++++------
kernel/futex.c | 6 +++---
lib/plist.c | 8 +++++---
3 files changed, 45 insertions(+), 12 deletions(-)
Index: linux-2.6-tip/include/linux/plist.h
===================================================================
--- linux-2.6-tip.orig/include/linux/plist.h
+++ linux-2.6-tip/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ raw_spinlock_t *rawlock;
+ spinlock_t *spinlock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_RAW(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
- * @lock: list spinlock, remembered for debugging
+ * @lock: spinlock protecting the list (debugging)
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head,
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->spinlock = lock;
+ head->rawlock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_raw - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: raw_spinlock protecting the list (debugging)
+ */
+static inline void
+plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->rawlock = lock;
+ head->spinlock = NULL;
#endif
}
Index: linux-2.6-tip/kernel/futex.c
===================================================================
--- linux-2.6-tip.orig/kernel/futex.c
+++ linux-2.6-tip/kernel/futex.c
@@ -1004,7 +1004,7 @@ void requeue_futex(struct futex_q *q, st
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb2->lock;
+ q->list.plist.spinlock = &hb2->lock;
#endif
}
get_futex_key_refs(key2);
@@ -1040,7 +1040,7 @@ void requeue_pi_wake_futex(struct futex_
q->lock_ptr = &hb->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
wake_up_state(q->task, TASK_NORMAL);
@@ -1388,7 +1388,7 @@ static inline void queue_me(struct futex
plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain);
q->task = current;
Index: linux-2.6-tip/lib/plist.c
===================================================================
--- linux-2.6-tip.orig/lib/plist.c
+++ linux-2.6-tip/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list
static void plist_check_head(struct plist_head *head)
{
- WARN_ON(!head->lock);
- if (head->lock)
- WARN_ON_SMP(!spin_is_locked(head->lock));
+ WARN_ON(!head->rawlock && !head->spinlock);
+ if (head->rawlock)
+ WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
+ if (head->spinlock)
+ WARN_ON_SMP(!spin_is_locked(head->spinlock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 13/23] plist: Make plist debugging raw_spinlock aware
2009-12-06 18:02 ` [patch 13/23] plist: Make plist debugging raw_spinlock aware Thomas Gleixner
@ 2009-12-06 18:02 ` Thomas Gleixner
2009-12-07 1:21 ` Frederic Weisbecker
1 sibling, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:02 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: plist-make-core-lock-aware.patch --]
[-- Type: text/plain, Size: 4595 bytes --]
plists are used with spinlocks and raw_spinlocks. Change the plist
debugging to handle both types.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/plist.h | 43 +++++++++++++++++++++++++++++++++++++------
kernel/futex.c | 6 +++---
lib/plist.c | 8 +++++---
3 files changed, 45 insertions(+), 12 deletions(-)
Index: linux-2.6-tip/include/linux/plist.h
===================================================================
--- linux-2.6-tip.orig/include/linux/plist.h
+++ linux-2.6-tip/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ raw_spinlock_t *rawlock;
+ spinlock_t *spinlock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_RAW(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
- * @lock: list spinlock, remembered for debugging
+ * @lock: spinlock protecting the list (debugging)
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head,
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->spinlock = lock;
+ head->rawlock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_raw - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: raw_spinlock protecting the list (debugging)
+ */
+static inline void
+plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->rawlock = lock;
+ head->spinlock = NULL;
#endif
}
Index: linux-2.6-tip/kernel/futex.c
===================================================================
--- linux-2.6-tip.orig/kernel/futex.c
+++ linux-2.6-tip/kernel/futex.c
@@ -1004,7 +1004,7 @@ void requeue_futex(struct futex_q *q, st
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb2->lock;
+ q->list.plist.spinlock = &hb2->lock;
#endif
}
get_futex_key_refs(key2);
@@ -1040,7 +1040,7 @@ void requeue_pi_wake_futex(struct futex_
q->lock_ptr = &hb->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
wake_up_state(q->task, TASK_NORMAL);
@@ -1388,7 +1388,7 @@ static inline void queue_me(struct futex
plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain);
q->task = current;
Index: linux-2.6-tip/lib/plist.c
===================================================================
--- linux-2.6-tip.orig/lib/plist.c
+++ linux-2.6-tip/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list
static void plist_check_head(struct plist_head *head)
{
- WARN_ON(!head->lock);
- if (head->lock)
- WARN_ON_SMP(!spin_is_locked(head->lock));
+ WARN_ON(!head->rawlock && !head->spinlock);
+ if (head->rawlock)
+ WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
+ if (head->spinlock)
+ WARN_ON_SMP(!spin_is_locked(head->spinlock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 14/23] sched: Convert rq->lock to raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (12 preceding siblings ...)
2009-12-06 18:02 ` [patch 13/23] plist: Make plist debugging raw_spinlock aware Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 15/23] sched: Convert rt_runtime_lock " Thomas Gleixner
` (8 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-rq-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 21371 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched.c | 183 ++++++++++++++++++++++++------------------------
kernel/sched_debug.c | 4 -
kernel/sched_idletask.c | 4 -
kernel/sched_rt.c | 14 +--
4 files changed, 104 insertions(+), 101 deletions(-)
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -525,7 +525,7 @@ static struct root_domain def_root_domai
*/
struct rq {
/* runqueue lock: */
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *r
*/
int runqueue_is_locked(int cpu)
{
- return spin_is_locked(&cpu_rq(cpu)->lock);
+ return raw_spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
@@ -883,7 +883,7 @@ static inline void finish_lock_switch(st
{
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
- rq->lock.rlock.owner = current;
+ rq->lock.owner = current;
#endif
/*
* If we are tracking spinlock dependencies then we have to
@@ -892,7 +892,7 @@ static inline void finish_lock_switch(st
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -916,9 +916,9 @@ static inline void prepare_lock_switch(s
next->oncpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
#else
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
#endif
}
@@ -948,10 +948,10 @@ static inline struct rq *__task_rq_lock(
{
for (;;) {
struct rq *rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
}
@@ -968,10 +968,10 @@ static struct rq *task_rq_lock(struct ta
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
@@ -980,19 +980,19 @@ void task_rq_unlock_wait(struct task_str
struct rq *rq = task_rq(p);
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
- spin_unlock_wait(&rq->lock);
+ raw_spin_unlock_wait(&rq->lock);
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
@@ -1005,7 +1005,7 @@ static struct rq *this_rq_lock(void)
local_irq_disable();
rq = this_rq();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
return rq;
}
@@ -1052,10 +1052,10 @@ static enum hrtimer_restart hrtick(struc
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
@@ -1068,10 +1068,10 @@ static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
/*
@@ -1178,7 +1178,7 @@ static void resched_task(struct task_str
{
int cpu;
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
@@ -1200,10 +1200,10 @@ static void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!spin_trylock_irqsave(&rq->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
@@ -1272,7 +1272,7 @@ static void sched_rt_avg_update(struct r
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
@@ -1599,11 +1599,11 @@ static void update_group_shares_cpu(stru
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
}
@@ -1701,9 +1701,9 @@ static void update_shares_locked(struct
if (root_task_group_empty())
return;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
update_shares(sd);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
}
static void update_h_load(long cpu)
@@ -1743,7 +1743,7 @@ static inline int _double_lock_balance(s
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
@@ -1764,14 +1764,16 @@ static int _double_lock_balance(struct r
{
int ret = 0;
- if (unlikely(!spin_trylock(&busiest->lock))) {
+ if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
- spin_unlock(&this_rq->lock);
- spin_lock(&busiest->lock);
- spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_unlock(&this_rq->lock);
+ raw_spin_lock(&busiest->lock);
+ raw_spin_lock_nested(&this_rq->lock,
+ SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock_nested(&busiest->lock,
+ SINGLE_DEPTH_NESTING);
}
return ret;
}
@@ -1785,7 +1787,7 @@ static int double_lock_balance(struct rq
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
@@ -1795,7 +1797,7 @@ static int double_lock_balance(struct rq
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- spin_unlock(&busiest->lock);
+ raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif
@@ -2016,13 +2018,13 @@ void kthread_bind(struct task_struct *p,
return;
}
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
update_rq_clock(rq);
set_task_cpu(p, cpu);
p->cpus_allowed = cpumask_of_cpu(cpu);
p->rt.nr_cpus_allowed = 1;
p->flags |= PF_THREAD_BOUND;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
EXPORT_SYMBOL(kthread_bind);
@@ -2798,10 +2800,10 @@ static inline void post_schedule(struct
if (rq->post_schedule) {
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
@@ -3083,15 +3085,15 @@ static void double_rq_lock(struct rq *rq
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
- spin_lock(&rq1->lock);
+ raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
- spin_lock(&rq1->lock);
- spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq1->lock);
+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock(&rq2->lock);
- spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq2->lock);
+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
@@ -3108,9 +3110,9 @@ static void double_rq_unlock(struct rq *
__releases(rq1->lock)
__releases(rq2->lock)
{
- spin_unlock(&rq1->lock);
+ raw_spin_unlock(&rq1->lock);
if (rq1 != rq2)
- spin_unlock(&rq2->lock);
+ raw_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
@@ -4207,14 +4209,15 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
- spin_lock_irqsave(&busiest->lock, flags);
+ raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock,
+ flags);
all_pinned = 1;
goto out_one_pinned;
}
@@ -4224,7 +4227,7 @@ redo:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance)
wake_up_process(busiest->migration_thread);
@@ -4406,10 +4409,10 @@ redo:
/*
* Should not call ttwu while holding a rq->lock
*/
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
- spin_lock(&this_rq->lock);
+ raw_spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
@@ -5278,11 +5281,11 @@ void scheduler_tick(void)
sched_clock_tick();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
perf_event_task_tick(curr, cpu);
@@ -5478,7 +5481,7 @@ need_resched_nonpreemptible:
if (sched_feat(HRTICK))
hrtick_clear(rq);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev);
@@ -5514,7 +5517,7 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
@@ -6995,7 +6998,7 @@ void __cpuinit init_idle(struct task_str
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->se.exec_start = sched_clock();
@@ -7008,7 +7011,7 @@ void __cpuinit init_idle(struct task_str
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
@@ -7204,10 +7207,10 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
}
@@ -7219,7 +7222,7 @@ static int migration_thread(void *data)
head = &rq->migration_queue;
if (list_empty(head)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
continue;
@@ -7228,14 +7231,14 @@ static int migration_thread(void *data)
list_del_init(head->next);
if (req->task != NULL) {
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
} else if (likely(cpu == (badcpu = smp_processor_id()))) {
req->dest_cpu = RCU_MIGRATION_GOT_QS;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
} else {
req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
}
local_irq_enable();
@@ -7358,14 +7361,14 @@ void sched_idle_next(void)
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -7401,9 +7404,9 @@ static void migrate_dead(unsigned int de
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -7670,13 +7673,13 @@ migration_call(struct notifier_block *nf
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -7701,14 +7704,14 @@ migration_call(struct notifier_block *nf
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -7718,30 +7721,30 @@ migration_call(struct notifier_block *nf
* they didn't take sched_hotcpu_mutex. Just wake up
* the requestors.
*/
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
req = list_entry(rq->migration_queue.next,
struct migration_req, list);
list_del_init(&req->list);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
complete(&req->done);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
@@ -7971,7 +7974,7 @@ static void rq_attach_root(struct rq *rq
struct root_domain *old_rd = NULL;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
@@ -7997,7 +8000,7 @@ static void rq_attach_root(struct rq *rq
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
@@ -9352,7 +9355,7 @@ static void init_rt_rq(struct rt_rq *rt_
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
@@ -9518,7 +9521,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -10106,9 +10109,9 @@ static void set_se_shares(struct sched_e
struct rq *rq = cfs_rq->rq;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
@@ -10708,9 +10711,9 @@ static u64 cpuacct_cpuusage_read(struct
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
@@ -10726,9 +10729,9 @@ static void cpuacct_cpuusage_write(struc
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
@@ -10962,9 +10965,9 @@ void synchronize_sched_expedited(void)
init_completion(&req->done);
req->task = NULL;
req->dest_cpu = RCU_MIGRATION_NEED_QS;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
list_add(&req->list, &rq->migration_queue);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
wake_up_process(rq->migration_thread);
}
for_each_online_cpu(cpu) {
@@ -10972,11 +10975,11 @@ void synchronize_sched_expedited(void)
req = &per_cpu(rcu_migration_req, cpu);
rq = cpu_rq(cpu);
wait_for_completion(&req->done);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
need_full_sync = 1;
req->dest_cpu = RCU_MIGRATION_IDLE;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
synchronize_sched_expedited_count++;
Index: linux-2.6-tip/kernel/sched_debug.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_debug.c
+++ linux-2.6-tip/kernel/sched_debug.c
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, in
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, in
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
Index: linux-2.6-tip/kernel/sched_idletask.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_idletask.c
+++ linux-2.6-tip/kernel/sched_idletask.c
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_tas
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
Index: linux-2.6-tip/kernel/sched_rt.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_rt.c
+++ linux-2.6-tip/kernel/sched_rt.c
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *r
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
@@ -524,7 +524,7 @@ static int do_sched_rt_period_timer(stru
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
@@ -545,7 +545,7 @@ static int do_sched_rt_period_timer(stru
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
return idle;
@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(st
task_running(rq, task) ||
!task->se.on_rq)) {
- spin_unlock(&lowest_rq->lock);
+ raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 14/23] sched: Convert rq->lock to raw_spinlock
2009-12-06 18:03 ` [patch 14/23] sched: Convert rq->lock to raw_spinlock Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-rq-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 21373 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched.c | 183 ++++++++++++++++++++++++------------------------
kernel/sched_debug.c | 4 -
kernel/sched_idletask.c | 4 -
kernel/sched_rt.c | 14 +--
4 files changed, 104 insertions(+), 101 deletions(-)
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -525,7 +525,7 @@ static struct root_domain def_root_domai
*/
struct rq {
/* runqueue lock: */
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *r
*/
int runqueue_is_locked(int cpu)
{
- return spin_is_locked(&cpu_rq(cpu)->lock);
+ return raw_spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
@@ -883,7 +883,7 @@ static inline void finish_lock_switch(st
{
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
- rq->lock.rlock.owner = current;
+ rq->lock.owner = current;
#endif
/*
* If we are tracking spinlock dependencies then we have to
@@ -892,7 +892,7 @@ static inline void finish_lock_switch(st
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -916,9 +916,9 @@ static inline void prepare_lock_switch(s
next->oncpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
#else
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
#endif
}
@@ -948,10 +948,10 @@ static inline struct rq *__task_rq_lock(
{
for (;;) {
struct rq *rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
}
@@ -968,10 +968,10 @@ static struct rq *task_rq_lock(struct ta
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
@@ -980,19 +980,19 @@ void task_rq_unlock_wait(struct task_str
struct rq *rq = task_rq(p);
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
- spin_unlock_wait(&rq->lock);
+ raw_spin_unlock_wait(&rq->lock);
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
@@ -1005,7 +1005,7 @@ static struct rq *this_rq_lock(void)
local_irq_disable();
rq = this_rq();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
return rq;
}
@@ -1052,10 +1052,10 @@ static enum hrtimer_restart hrtick(struc
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
@@ -1068,10 +1068,10 @@ static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
/*
@@ -1178,7 +1178,7 @@ static void resched_task(struct task_str
{
int cpu;
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
@@ -1200,10 +1200,10 @@ static void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!spin_trylock_irqsave(&rq->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
@@ -1272,7 +1272,7 @@ static void sched_rt_avg_update(struct r
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
@@ -1599,11 +1599,11 @@ static void update_group_shares_cpu(stru
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
}
@@ -1701,9 +1701,9 @@ static void update_shares_locked(struct
if (root_task_group_empty())
return;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
update_shares(sd);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
}
static void update_h_load(long cpu)
@@ -1743,7 +1743,7 @@ static inline int _double_lock_balance(s
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
@@ -1764,14 +1764,16 @@ static int _double_lock_balance(struct r
{
int ret = 0;
- if (unlikely(!spin_trylock(&busiest->lock))) {
+ if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
- spin_unlock(&this_rq->lock);
- spin_lock(&busiest->lock);
- spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_unlock(&this_rq->lock);
+ raw_spin_lock(&busiest->lock);
+ raw_spin_lock_nested(&this_rq->lock,
+ SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock_nested(&busiest->lock,
+ SINGLE_DEPTH_NESTING);
}
return ret;
}
@@ -1785,7 +1787,7 @@ static int double_lock_balance(struct rq
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
@@ -1795,7 +1797,7 @@ static int double_lock_balance(struct rq
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- spin_unlock(&busiest->lock);
+ raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif
@@ -2016,13 +2018,13 @@ void kthread_bind(struct task_struct *p,
return;
}
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
update_rq_clock(rq);
set_task_cpu(p, cpu);
p->cpus_allowed = cpumask_of_cpu(cpu);
p->rt.nr_cpus_allowed = 1;
p->flags |= PF_THREAD_BOUND;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
EXPORT_SYMBOL(kthread_bind);
@@ -2798,10 +2800,10 @@ static inline void post_schedule(struct
if (rq->post_schedule) {
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
@@ -3083,15 +3085,15 @@ static void double_rq_lock(struct rq *rq
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
- spin_lock(&rq1->lock);
+ raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
- spin_lock(&rq1->lock);
- spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq1->lock);
+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock(&rq2->lock);
- spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq2->lock);
+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
@@ -3108,9 +3110,9 @@ static void double_rq_unlock(struct rq *
__releases(rq1->lock)
__releases(rq2->lock)
{
- spin_unlock(&rq1->lock);
+ raw_spin_unlock(&rq1->lock);
if (rq1 != rq2)
- spin_unlock(&rq2->lock);
+ raw_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
@@ -4207,14 +4209,15 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
- spin_lock_irqsave(&busiest->lock, flags);
+ raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock,
+ flags);
all_pinned = 1;
goto out_one_pinned;
}
@@ -4224,7 +4227,7 @@ redo:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance)
wake_up_process(busiest->migration_thread);
@@ -4406,10 +4409,10 @@ redo:
/*
* Should not call ttwu while holding a rq->lock
*/
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
- spin_lock(&this_rq->lock);
+ raw_spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
@@ -5278,11 +5281,11 @@ void scheduler_tick(void)
sched_clock_tick();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
perf_event_task_tick(curr, cpu);
@@ -5478,7 +5481,7 @@ need_resched_nonpreemptible:
if (sched_feat(HRTICK))
hrtick_clear(rq);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev);
@@ -5514,7 +5517,7 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
@@ -6995,7 +6998,7 @@ void __cpuinit init_idle(struct task_str
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->se.exec_start = sched_clock();
@@ -7008,7 +7011,7 @@ void __cpuinit init_idle(struct task_str
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
@@ -7204,10 +7207,10 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
}
@@ -7219,7 +7222,7 @@ static int migration_thread(void *data)
head = &rq->migration_queue;
if (list_empty(head)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
continue;
@@ -7228,14 +7231,14 @@ static int migration_thread(void *data)
list_del_init(head->next);
if (req->task != NULL) {
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
} else if (likely(cpu == (badcpu = smp_processor_id()))) {
req->dest_cpu = RCU_MIGRATION_GOT_QS;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
} else {
req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
}
local_irq_enable();
@@ -7358,14 +7361,14 @@ void sched_idle_next(void)
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -7401,9 +7404,9 @@ static void migrate_dead(unsigned int de
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -7670,13 +7673,13 @@ migration_call(struct notifier_block *nf
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -7701,14 +7704,14 @@ migration_call(struct notifier_block *nf
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -7718,30 +7721,30 @@ migration_call(struct notifier_block *nf
* they didn't take sched_hotcpu_mutex. Just wake up
* the requestors.
*/
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
req = list_entry(rq->migration_queue.next,
struct migration_req, list);
list_del_init(&req->list);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
complete(&req->done);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
@@ -7971,7 +7974,7 @@ static void rq_attach_root(struct rq *rq
struct root_domain *old_rd = NULL;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
@@ -7997,7 +8000,7 @@ static void rq_attach_root(struct rq *rq
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
@@ -9352,7 +9355,7 @@ static void init_rt_rq(struct rt_rq *rt_
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
@@ -9518,7 +9521,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -10106,9 +10109,9 @@ static void set_se_shares(struct sched_e
struct rq *rq = cfs_rq->rq;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
@@ -10708,9 +10711,9 @@ static u64 cpuacct_cpuusage_read(struct
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
@@ -10726,9 +10729,9 @@ static void cpuacct_cpuusage_write(struc
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
@@ -10962,9 +10965,9 @@ void synchronize_sched_expedited(void)
init_completion(&req->done);
req->task = NULL;
req->dest_cpu = RCU_MIGRATION_NEED_QS;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
list_add(&req->list, &rq->migration_queue);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
wake_up_process(rq->migration_thread);
}
for_each_online_cpu(cpu) {
@@ -10972,11 +10975,11 @@ void synchronize_sched_expedited(void)
req = &per_cpu(rcu_migration_req, cpu);
rq = cpu_rq(cpu);
wait_for_completion(&req->done);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
need_full_sync = 1;
req->dest_cpu = RCU_MIGRATION_IDLE;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
synchronize_sched_expedited_count++;
Index: linux-2.6-tip/kernel/sched_debug.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_debug.c
+++ linux-2.6-tip/kernel/sched_debug.c
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, in
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, in
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
Index: linux-2.6-tip/kernel/sched_idletask.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_idletask.c
+++ linux-2.6-tip/kernel/sched_idletask.c
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_tas
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
Index: linux-2.6-tip/kernel/sched_rt.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_rt.c
+++ linux-2.6-tip/kernel/sched_rt.c
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *r
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
@@ -524,7 +524,7 @@ static int do_sched_rt_period_timer(stru
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
@@ -545,7 +545,7 @@ static int do_sched_rt_period_timer(stru
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
return idle;
@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(st
task_running(rq, task) ||
!task->se.on_rq)) {
- spin_unlock(&lowest_rq->lock);
+ raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 15/23] sched: Convert rt_runtime_lock to raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (13 preceding siblings ...)
2009-12-06 18:03 ` [patch 14/23] sched: Convert rq->lock to raw_spinlock Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 16/23] sched: Convert cpupri lock " Thomas Gleixner
` (7 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-rt-runtime-lock.patch --]
[-- Type: text/plain, Size: 8435 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched.c | 28 ++++++++++++++--------------
kernel/sched_rt.c | 46 +++++++++++++++++++++++-----------------------
2 files changed, 37 insertions(+), 37 deletions(-)
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -141,7 +141,7 @@ struct rt_prio_array {
struct rt_bandwidth {
/* nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwid
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
- spin_lock_init(&rt_b->rt_runtime_lock);
+ raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt
if (hrtimer_active(&rt_b->rt_period_timer))
return;
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
@@ -470,7 +470,7 @@ struct rt_rq {
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
@@ -9361,7 +9361,7 @@ static void init_rt_rq(struct rt_rq *rt_
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
- spin_lock_init(&rt_rq->rt_runtime_lock);
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
@@ -10296,18 +10296,18 @@ static int tg_set_bandwidth(struct task_
if (err)
goto unlock;
- spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
@@ -10412,15 +10412,15 @@ static int sched_rt_global_constraints(v
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
- spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
Index: linux-2.6-tip/kernel/sched_rt.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_rt.c
+++ linux-2.6-tip/kernel/sched_rt.c
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_
weight = cpumask_weight(rd->span);
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_
if (iter == rt_rq)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_
rt_rq->rt_runtime += diff;
more = 1;
if (rt_rq->rt_runtime == rt_period) {
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
return more;
}
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq
s64 want;
int i;
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq
iter->rt_runtime -= want;
want -= want;
}
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
@@ -445,8 +445,8 @@ balanced:
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *
for_each_leaf_rt_rq(rt_rq, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq
int more = 0;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
}
return more;
@@ -528,7 +528,7 @@ static int do_sched_rt_period_timer(stru
if (rt_rq->rt_time) {
u64 runtime;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
@@ -539,7 +539,7 @@ static int do_sched_rt_period_timer(stru
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running)
idle = 0;
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq
rt_rq = rt_rq_of_se(rt_se);
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq))
resched_task(curr);
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 15/23] sched: Convert rt_runtime_lock to raw_spinlock
2009-12-06 18:03 ` [patch 15/23] sched: Convert rt_runtime_lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-rt-runtime-lock.patch --]
[-- Type: text/plain, Size: 8437 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched.c | 28 ++++++++++++++--------------
kernel/sched_rt.c | 46 +++++++++++++++++++++++-----------------------
2 files changed, 37 insertions(+), 37 deletions(-)
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -141,7 +141,7 @@ struct rt_prio_array {
struct rt_bandwidth {
/* nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwid
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
- spin_lock_init(&rt_b->rt_runtime_lock);
+ raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt
if (hrtimer_active(&rt_b->rt_period_timer))
return;
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
@@ -470,7 +470,7 @@ struct rt_rq {
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
@@ -9361,7 +9361,7 @@ static void init_rt_rq(struct rt_rq *rt_
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
- spin_lock_init(&rt_rq->rt_runtime_lock);
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
@@ -10296,18 +10296,18 @@ static int tg_set_bandwidth(struct task_
if (err)
goto unlock;
- spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
@@ -10412,15 +10412,15 @@ static int sched_rt_global_constraints(v
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
- spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
Index: linux-2.6-tip/kernel/sched_rt.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_rt.c
+++ linux-2.6-tip/kernel/sched_rt.c
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_
weight = cpumask_weight(rd->span);
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_
if (iter == rt_rq)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_
rt_rq->rt_runtime += diff;
more = 1;
if (rt_rq->rt_runtime == rt_period) {
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
return more;
}
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq
s64 want;
int i;
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq
iter->rt_runtime -= want;
want -= want;
}
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
@@ -445,8 +445,8 @@ balanced:
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *
for_each_leaf_rt_rq(rt_rq, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq
int more = 0;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
}
return more;
@@ -528,7 +528,7 @@ static int do_sched_rt_period_timer(stru
if (rt_rq->rt_time) {
u64 runtime;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
@@ -539,7 +539,7 @@ static int do_sched_rt_period_timer(stru
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running)
idle = 0;
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq
rt_rq = rt_rq_of_se(rt_se);
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq))
resched_task(curr);
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 16/23] sched: Convert cpupri lock to raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (14 preceding siblings ...)
2009-12-06 18:03 ` [patch 15/23] sched: Convert rt_runtime_lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 17/23] sched: Convert pi_lock " Thomas Gleixner
` (6 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-cpupri-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 1940 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched_cpupri.c | 10 +++++-----
kernel/sched_cpupri.h | 2 +-
2 files changed, 6 insertions(+), 6 deletions(-)
Index: linux-2.6-tip/kernel/sched_cpupri.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_cpupri.c
+++ linux-2.6-tip/kernel/sched_cpupri.c
@@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int c
if (likely(newpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
cpumask_set_cpu(cpu, vec->mask);
vec->count++;
if (vec->count == 1)
set_bit(newpri, cp->pri_active);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
if (likely(oldpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
vec->count--;
if (!vec->count)
clear_bit(oldpri, cp->pri_active);
cpumask_clear_cpu(cpu, vec->mask);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
*currpri = newpri;
@@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[i];
- spin_lock_init(&vec->lock);
+ raw_spin_lock_init(&vec->lock);
vec->count = 0;
if (!zalloc_cpumask_var(&vec->mask, gfp))
goto cleanup;
Index: linux-2.6-tip/kernel/sched_cpupri.h
===================================================================
--- linux-2.6-tip.orig/kernel/sched_cpupri.h
+++ linux-2.6-tip/kernel/sched_cpupri.h
@@ -12,7 +12,7 @@
/* values 2-101 are RT priorities 0-99 */
struct cpupri_vec {
- spinlock_t lock;
+ raw_spinlock_t lock;
int count;
cpumask_var_t mask;
};
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 16/23] sched: Convert cpupri lock to raw_spinlock
2009-12-06 18:03 ` [patch 16/23] sched: Convert cpupri lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-cpupri-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 1942 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched_cpupri.c | 10 +++++-----
kernel/sched_cpupri.h | 2 +-
2 files changed, 6 insertions(+), 6 deletions(-)
Index: linux-2.6-tip/kernel/sched_cpupri.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched_cpupri.c
+++ linux-2.6-tip/kernel/sched_cpupri.c
@@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int c
if (likely(newpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
cpumask_set_cpu(cpu, vec->mask);
vec->count++;
if (vec->count == 1)
set_bit(newpri, cp->pri_active);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
if (likely(oldpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
vec->count--;
if (!vec->count)
clear_bit(oldpri, cp->pri_active);
cpumask_clear_cpu(cpu, vec->mask);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
*currpri = newpri;
@@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[i];
- spin_lock_init(&vec->lock);
+ raw_spin_lock_init(&vec->lock);
vec->count = 0;
if (!zalloc_cpumask_var(&vec->mask, gfp))
goto cleanup;
Index: linux-2.6-tip/kernel/sched_cpupri.h
===================================================================
--- linux-2.6-tip.orig/kernel/sched_cpupri.h
+++ linux-2.6-tip/kernel/sched_cpupri.h
@@ -12,7 +12,7 @@
/* values 2-101 are RT priorities 0-99 */
struct cpupri_vec {
- spinlock_t lock;
+ raw_spinlock_t lock;
int count;
cpumask_var_t mask;
};
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 17/23] sched: Convert pi_lock to raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (15 preceding siblings ...)
2009-12-06 18:03 ` [patch 16/23] sched: Convert cpupri lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 18/23] rtmutes: Convert rtmutex.lock " Thomas Gleixner
` (5 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-pi-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 15936 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/init_task.h | 2 -
include/linux/sched.h | 2 -
kernel/exit.c | 2 -
kernel/fork.c | 4 +--
kernel/futex.c | 38 +++++++++++++++---------------
kernel/rtmutex-debug.c | 4 +--
kernel/rtmutex.c | 58 +++++++++++++++++++++++-----------------------
kernel/sched.c | 12 ++++-----
8 files changed, 61 insertions(+), 61 deletions(-)
Index: linux-2.6-tip/include/linux/init_task.h
===================================================================
--- linux-2.6-tip.orig/include/linux/init_task.h
+++ linux-2.6-tip/include/linux/init_task.h
@@ -165,7 +165,7 @@ extern struct cred init_cred;
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
Index: linux-2.6-tip/include/linux/sched.h
===================================================================
--- linux-2.6-tip.orig/include/linux/sched.h
+++ linux-2.6-tip/include/linux/sched.h
@@ -1411,7 +1411,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
Index: linux-2.6-tip/kernel/exit.c
===================================================================
--- linux-2.6-tip.orig/kernel/exit.c
+++ linux-2.6-tip/kernel/exit.c
@@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code)
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
- spin_unlock_wait(&tsk->pi_lock);
+ raw_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
Index: linux-2.6-tip/kernel/fork.c
===================================================================
--- linux-2.6-tip.orig/kernel/fork.c
+++ linux-2.6-tip/kernel/fork.c
@@ -937,9 +937,9 @@ SYSCALL_DEFINE1(set_tid_address, int __u
static void rt_mutex_init_task(struct task_struct *p)
{
- spin_lock_init(&p->pi_lock);
+ raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&p->pi_waiters, &p->pi_lock);
+ plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
Index: linux-2.6-tip/kernel/futex.c
===================================================================
--- linux-2.6-tip.orig/kernel/futex.c
+++ linux-2.6-tip/kernel/futex.c
@@ -397,9 +397,9 @@ static void free_pi_state(struct futex_p
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
@@ -464,18 +464,18 @@ void exit_pi_state_list(struct task_stru
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
@@ -489,15 +489,15 @@ void exit_pi_state_list(struct task_stru
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
}
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
}
static int
@@ -552,7 +552,7 @@ lookup_pi_state(u32 uval, struct futex_h
* change of the task flags, we do this protected by
* p->pi_lock:
*/
- spin_lock_irq(&p->pi_lock);
+ raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
@@ -561,7 +561,7 @@ lookup_pi_state(u32 uval, struct futex_h
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
@@ -580,7 +580,7 @@ lookup_pi_state(u32 uval, struct futex_h
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -788,16 +788,16 @@ static int wake_futex_pi(u32 __user *uad
}
}
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
- spin_lock_irq(&new_owner->pi_lock);
+ raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
- spin_unlock_irq(&new_owner->pi_lock);
+ raw_spin_unlock_irq(&new_owner->pi_lock);
spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
@@ -1523,18 +1523,18 @@ retry:
* itself.
*/
if (pi_state->owner != NULL) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
- spin_lock_irq(&newowner->pi_lock);
+ raw_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
- spin_unlock_irq(&newowner->pi_lock);
+ raw_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
Index: linux-2.6-tip/kernel/rtmutex-debug.c
===================================================================
--- linux-2.6-tip.orig/kernel/rtmutex-debug.c
+++ linux-2.6-tip/kernel/rtmutex-debug.c
@@ -37,8 +37,8 @@ do { \
if (rt_trace_on) { \
rt_trace_on = 0; \
console_verbose(); \
- if (spin_is_locked(¤t->pi_lock)) \
- spin_unlock(¤t->pi_lock); \
+ if (raw_spin_is_locked(¤t->pi_lock)) \
+ raw_spin_unlock(¤t->pi_lock); \
} \
} while (0)
Index: linux-2.6-tip/kernel/rtmutex.c
===================================================================
--- linux-2.6-tip.orig/kernel/rtmutex.c
+++ linux-2.6-tip/kernel/rtmutex.c
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct
{
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
/*
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(st
/*
* Task can not go away as we did a get_task() before !
*/
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
/*
@@ -232,7 +232,7 @@ static int rt_mutex_adjust_prio_chain(st
lock = waiter->lock;
if (!spin_trylock(&lock->wait_lock)) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
}
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(st
plist_add(&waiter->list_entry, &lock->wait_list);
/* Release the task */
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
put_task_struct(task);
/* Grab the next task */
task = rt_mutex_owner(lock);
get_task_struct(task);
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
/* Boost the owner */
@@ -277,7 +277,7 @@ static int rt_mutex_adjust_prio_chain(st
__rt_mutex_adjust_prio(task);
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
spin_unlock(&lock->wait_lock);
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(st
goto again;
out_unlock_pi:
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(stru
if (pendowner == task)
return 1;
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
if (task->prio >= pendowner->prio) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 0;
}
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(stru
* priority.
*/
if (likely(!rt_mutex_has_waiters(lock))) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 1;
}
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(stru
next = rt_mutex_top_waiter(lock);
plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
__rt_mutex_adjust_prio(pendowner);
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
/*
* We are going to steal the lock and a waiter was
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(stru
* might be task:
*/
if (likely(next->task != task)) {
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
plist_add(&next->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
return 1;
}
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struc
unsigned long flags;
int chain_walk = 0, res;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struc
task->pi_blocked_on = waiter;
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
chain_walk = 1;
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt
struct task_struct *pendowner;
unsigned long flags;
- spin_lock_irqsave(¤t->pi_lock, flags);
+ raw_spin_lock_irqsave(¤t->pi_lock, flags);
waiter = rt_mutex_top_waiter(lock);
plist_del(&waiter->list_entry, &lock->wait_list);
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt
rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
- spin_unlock_irqrestore(¤t->pi_lock, flags);
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
/*
* Clear the pi_blocked_on variable and enqueue a possible
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt
* waiter with higher priority than pending-owner->normal_prio
* is blocked on the unboosted (pending) owner.
*/
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
WARN_ON(!pendowner->pi_blocked_on);
WARN_ON(pendowner->pi_blocked_on != waiter);
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
}
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
wake_up_process(pendowner);
}
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mute
unsigned long flags;
int chain_walk = 0;
- spin_lock_irqsave(¤t->pi_lock, flags);
+ raw_spin_lock_irqsave(¤t->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->task = NULL;
current->pi_blocked_on = NULL;
- spin_unlock_irqrestore(¤t->pi_lock, flags);
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
if (first && owner != current) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mute
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_stru
struct rt_mutex_waiter *waiter;
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -6346,7 +6346,7 @@ recheck:
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*/
- spin_lock_irqsave(&p->pi_lock, flags);
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -6356,7 +6356,7 @@ recheck:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
update_rq_clock(rq);
@@ -6380,7 +6380,7 @@ recheck:
check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
@@ -9619,7 +9619,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
#endif
/*
@@ -9744,13 +9744,13 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock(&p->pi_lock);
+ raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock(&p->pi_lock);
+ raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 17/23] sched: Convert pi_lock to raw_spinlock
2009-12-06 18:03 ` [patch 17/23] sched: Convert pi_lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: sched-convert-pi-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 15938 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/init_task.h | 2 -
include/linux/sched.h | 2 -
kernel/exit.c | 2 -
kernel/fork.c | 4 +--
kernel/futex.c | 38 +++++++++++++++---------------
kernel/rtmutex-debug.c | 4 +--
kernel/rtmutex.c | 58 +++++++++++++++++++++++-----------------------
kernel/sched.c | 12 ++++-----
8 files changed, 61 insertions(+), 61 deletions(-)
Index: linux-2.6-tip/include/linux/init_task.h
===================================================================
--- linux-2.6-tip.orig/include/linux/init_task.h
+++ linux-2.6-tip/include/linux/init_task.h
@@ -165,7 +165,7 @@ extern struct cred init_cred;
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
Index: linux-2.6-tip/include/linux/sched.h
===================================================================
--- linux-2.6-tip.orig/include/linux/sched.h
+++ linux-2.6-tip/include/linux/sched.h
@@ -1411,7 +1411,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
Index: linux-2.6-tip/kernel/exit.c
===================================================================
--- linux-2.6-tip.orig/kernel/exit.c
+++ linux-2.6-tip/kernel/exit.c
@@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code)
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
- spin_unlock_wait(&tsk->pi_lock);
+ raw_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
Index: linux-2.6-tip/kernel/fork.c
===================================================================
--- linux-2.6-tip.orig/kernel/fork.c
+++ linux-2.6-tip/kernel/fork.c
@@ -937,9 +937,9 @@ SYSCALL_DEFINE1(set_tid_address, int __u
static void rt_mutex_init_task(struct task_struct *p)
{
- spin_lock_init(&p->pi_lock);
+ raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&p->pi_waiters, &p->pi_lock);
+ plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
Index: linux-2.6-tip/kernel/futex.c
===================================================================
--- linux-2.6-tip.orig/kernel/futex.c
+++ linux-2.6-tip/kernel/futex.c
@@ -397,9 +397,9 @@ static void free_pi_state(struct futex_p
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
@@ -464,18 +464,18 @@ void exit_pi_state_list(struct task_stru
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
@@ -489,15 +489,15 @@ void exit_pi_state_list(struct task_stru
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
}
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
}
static int
@@ -552,7 +552,7 @@ lookup_pi_state(u32 uval, struct futex_h
* change of the task flags, we do this protected by
* p->pi_lock:
*/
- spin_lock_irq(&p->pi_lock);
+ raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
@@ -561,7 +561,7 @@ lookup_pi_state(u32 uval, struct futex_h
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
@@ -580,7 +580,7 @@ lookup_pi_state(u32 uval, struct futex_h
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -788,16 +788,16 @@ static int wake_futex_pi(u32 __user *uad
}
}
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
- spin_lock_irq(&new_owner->pi_lock);
+ raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
- spin_unlock_irq(&new_owner->pi_lock);
+ raw_spin_unlock_irq(&new_owner->pi_lock);
spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
@@ -1523,18 +1523,18 @@ retry:
* itself.
*/
if (pi_state->owner != NULL) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
- spin_lock_irq(&newowner->pi_lock);
+ raw_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
- spin_unlock_irq(&newowner->pi_lock);
+ raw_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
Index: linux-2.6-tip/kernel/rtmutex-debug.c
===================================================================
--- linux-2.6-tip.orig/kernel/rtmutex-debug.c
+++ linux-2.6-tip/kernel/rtmutex-debug.c
@@ -37,8 +37,8 @@ do { \
if (rt_trace_on) { \
rt_trace_on = 0; \
console_verbose(); \
- if (spin_is_locked(¤t->pi_lock)) \
- spin_unlock(¤t->pi_lock); \
+ if (raw_spin_is_locked(¤t->pi_lock)) \
+ raw_spin_unlock(¤t->pi_lock); \
} \
} while (0)
Index: linux-2.6-tip/kernel/rtmutex.c
===================================================================
--- linux-2.6-tip.orig/kernel/rtmutex.c
+++ linux-2.6-tip/kernel/rtmutex.c
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct
{
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
/*
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(st
/*
* Task can not go away as we did a get_task() before !
*/
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
/*
@@ -232,7 +232,7 @@ static int rt_mutex_adjust_prio_chain(st
lock = waiter->lock;
if (!spin_trylock(&lock->wait_lock)) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
}
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(st
plist_add(&waiter->list_entry, &lock->wait_list);
/* Release the task */
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
put_task_struct(task);
/* Grab the next task */
task = rt_mutex_owner(lock);
get_task_struct(task);
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
/* Boost the owner */
@@ -277,7 +277,7 @@ static int rt_mutex_adjust_prio_chain(st
__rt_mutex_adjust_prio(task);
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
spin_unlock(&lock->wait_lock);
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(st
goto again;
out_unlock_pi:
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(stru
if (pendowner == task)
return 1;
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
if (task->prio >= pendowner->prio) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 0;
}
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(stru
* priority.
*/
if (likely(!rt_mutex_has_waiters(lock))) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 1;
}
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(stru
next = rt_mutex_top_waiter(lock);
plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
__rt_mutex_adjust_prio(pendowner);
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
/*
* We are going to steal the lock and a waiter was
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(stru
* might be task:
*/
if (likely(next->task != task)) {
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
plist_add(&next->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
return 1;
}
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struc
unsigned long flags;
int chain_walk = 0, res;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struc
task->pi_blocked_on = waiter;
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
chain_walk = 1;
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt
struct task_struct *pendowner;
unsigned long flags;
- spin_lock_irqsave(¤t->pi_lock, flags);
+ raw_spin_lock_irqsave(¤t->pi_lock, flags);
waiter = rt_mutex_top_waiter(lock);
plist_del(&waiter->list_entry, &lock->wait_list);
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt
rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
- spin_unlock_irqrestore(¤t->pi_lock, flags);
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
/*
* Clear the pi_blocked_on variable and enqueue a possible
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt
* waiter with higher priority than pending-owner->normal_prio
* is blocked on the unboosted (pending) owner.
*/
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
WARN_ON(!pendowner->pi_blocked_on);
WARN_ON(pendowner->pi_blocked_on != waiter);
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
}
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
wake_up_process(pendowner);
}
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mute
unsigned long flags;
int chain_walk = 0;
- spin_lock_irqsave(¤t->pi_lock, flags);
+ raw_spin_lock_irqsave(¤t->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->task = NULL;
current->pi_blocked_on = NULL;
- spin_unlock_irqrestore(¤t->pi_lock, flags);
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
if (first && owner != current) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mute
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_stru
struct rt_mutex_waiter *waiter;
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
Index: linux-2.6-tip/kernel/sched.c
===================================================================
--- linux-2.6-tip.orig/kernel/sched.c
+++ linux-2.6-tip/kernel/sched.c
@@ -6346,7 +6346,7 @@ recheck:
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*/
- spin_lock_irqsave(&p->pi_lock, flags);
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -6356,7 +6356,7 @@ recheck:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
update_rq_clock(rq);
@@ -6380,7 +6380,7 @@ recheck:
check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
@@ -9619,7 +9619,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
#endif
/*
@@ -9744,13 +9744,13 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock(&p->pi_lock);
+ raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock(&p->pi_lock);
+ raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 18/23] rtmutes: Convert rtmutex.lock to raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (16 preceding siblings ...)
2009-12-06 18:03 ` [patch 17/23] sched: Convert pi_lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 19/23] smp: Convert smplocks to raw_spinlocks Thomas Gleixner
` (4 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: rtmutex-convert-wait-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 7361 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rtmutex.h | 6 +++---
kernel/futex.c | 6 +++---
kernel/rtmutex.c | 48 ++++++++++++++++++++++++------------------------
3 files changed, 30 insertions(+), 30 deletions(-)
Index: linux-2.6-tip/include/linux/rtmutex.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rtmutex.h
+++ linux-2.6-tip/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
Index: linux-2.6-tip/kernel/futex.c
===================================================================
--- linux-2.6-tip.orig/kernel/futex.c
+++ linux-2.6-tip/kernel/futex.c
@@ -754,7 +754,7 @@ static int wake_futex_pi(u32 __user *uad
if (!pi_state)
return -EINVAL;
- spin_lock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
@@ -783,7 +783,7 @@ static int wake_futex_pi(u32 __user *uad
else if (curval != uval)
ret = -EINVAL;
if (ret) {
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
@@ -799,7 +799,7 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock_irq(&new_owner->pi_lock);
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
Index: linux-2.6-tip/kernel/rtmutex.c
===================================================================
--- linux-2.6-tip.orig/kernel/rtmutex.c
+++ linux-2.6-tip/kernel/rtmutex.c
@@ -231,7 +231,7 @@ static int rt_mutex_adjust_prio_chain(st
goto out_unlock_pi;
lock = waiter->lock;
- if (!spin_trylock(&lock->wait_lock)) {
+ if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(st
/* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0;
goto out_unlock_pi;
}
@@ -280,7 +280,7 @@ static int rt_mutex_adjust_prio_chain(st
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struc
*/
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
task);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
return res;
}
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mute
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
}
/*
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *loc
break;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
if (waiter->task)
schedule_rt_mutex(lock);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock,
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock)) {
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return 0;
}
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Remove pending timer: */
if (unlikely(timeout))
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
{
int ret = 0;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
fixup_rt_mutex_waiters(lock);
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return ret;
}
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
static void __sched
rt_mutex_slowunlock(struct rt_mutex *lock)
{
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *loc
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return;
}
wakeup_next_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Undo pi boosting if necessary: */
rt_mutex_adjust_prio(current);
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
- spin_lock_init(&lock->wait_lock);
- plist_head_init(&lock->wait_list, &lock->wait_lock);
+ raw_spin_lock_init(&lock->wait_lock);
+ plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
debug_rt_mutex_init(lock, name);
}
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
mark_rt_mutex_waiters(lock);
@@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_
/* We got the lock for task. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task, 0);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_deadlock_account_lock(lock, task);
return 1;
}
@@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_
*/
ret = 0;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
@@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/*
* Readjust priority, when we did not get the lock. We might have been
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 18/23] rtmutes: Convert rtmutex.lock to raw_spinlock
2009-12-06 18:03 ` [patch 18/23] rtmutes: Convert rtmutex.lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: rtmutex-convert-wait-lock-to-core-lock.patch --]
[-- Type: text/plain, Size: 7363 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rtmutex.h | 6 +++---
kernel/futex.c | 6 +++---
kernel/rtmutex.c | 48 ++++++++++++++++++++++++------------------------
3 files changed, 30 insertions(+), 30 deletions(-)
Index: linux-2.6-tip/include/linux/rtmutex.h
===================================================================
--- linux-2.6-tip.orig/include/linux/rtmutex.h
+++ linux-2.6-tip/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
Index: linux-2.6-tip/kernel/futex.c
===================================================================
--- linux-2.6-tip.orig/kernel/futex.c
+++ linux-2.6-tip/kernel/futex.c
@@ -754,7 +754,7 @@ static int wake_futex_pi(u32 __user *uad
if (!pi_state)
return -EINVAL;
- spin_lock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
@@ -783,7 +783,7 @@ static int wake_futex_pi(u32 __user *uad
else if (curval != uval)
ret = -EINVAL;
if (ret) {
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
@@ -799,7 +799,7 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock_irq(&new_owner->pi_lock);
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
Index: linux-2.6-tip/kernel/rtmutex.c
===================================================================
--- linux-2.6-tip.orig/kernel/rtmutex.c
+++ linux-2.6-tip/kernel/rtmutex.c
@@ -231,7 +231,7 @@ static int rt_mutex_adjust_prio_chain(st
goto out_unlock_pi;
lock = waiter->lock;
- if (!spin_trylock(&lock->wait_lock)) {
+ if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(st
/* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0;
goto out_unlock_pi;
}
@@ -280,7 +280,7 @@ static int rt_mutex_adjust_prio_chain(st
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struc
*/
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
task);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
return res;
}
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mute
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
}
/*
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *loc
break;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
if (waiter->task)
schedule_rt_mutex(lock);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock,
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock)) {
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return 0;
}
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Remove pending timer: */
if (unlikely(timeout))
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
{
int ret = 0;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
fixup_rt_mutex_waiters(lock);
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return ret;
}
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
static void __sched
rt_mutex_slowunlock(struct rt_mutex *lock)
{
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *loc
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return;
}
wakeup_next_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Undo pi boosting if necessary: */
rt_mutex_adjust_prio(current);
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
- spin_lock_init(&lock->wait_lock);
- plist_head_init(&lock->wait_list, &lock->wait_lock);
+ raw_spin_lock_init(&lock->wait_lock);
+ plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
debug_rt_mutex_init(lock, name);
}
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
mark_rt_mutex_waiters(lock);
@@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_
/* We got the lock for task. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task, 0);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_deadlock_account_lock(lock, task);
return 1;
}
@@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_
*/
ret = 0;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
@@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/*
* Readjust priority, when we did not get the lock. We might have been
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 19/23] smp: Convert smplocks to raw_spinlocks
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (17 preceding siblings ...)
2009-12-06 18:03 ` [patch 18/23] rtmutes: Convert rtmutex.lock " Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 20/23] genirq: Convert irq_desc.lock to raw_spinlock Thomas Gleixner
` (3 subsequent siblings)
22 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: smp-convert-smplocks-to-core-locks.patch --]
[-- Type: text/plain, Size: 3480 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/smp.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
Index: linux-2.6-tip/kernel/smp.c
===================================================================
--- linux-2.6-tip.orig/kernel/smp.c
+++ linux-2.6-tip/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single
static struct {
struct list_head queue;
- spinlock_t lock;
+ raw_spinlock_t lock;
} call_function __cacheline_aligned_in_smp =
{
.queue = LIST_HEAD_INIT(call_function.queue),
- .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
};
enum {
@@ -35,7 +35,7 @@ struct call_function_data {
struct call_single_queue {
struct list_head list;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_da
for_each_possible_cpu(i) {
struct call_single_queue *q = &per_cpu(call_single_queue, i);
- spin_lock_init(&q->lock);
+ raw_spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct
unsigned long flags;
int ipi;
- spin_lock_irqsave(&dst->lock, flags);
+ raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&data->list, &dst->list);
- spin_unlock_irqrestore(&dst->lock, flags);
+ raw_spin_unlock_irqrestore(&dst->lock, flags);
/*
* The list addition should be visible before sending the IPI
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (!refs) {
- spin_lock(&call_function.lock);
+ raw_spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
- spin_unlock(&call_function.lock);
+ raw_spin_unlock(&call_function.lock);
}
if (refs)
@@ -230,9 +230,9 @@ void generic_smp_call_function_single_in
*/
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
- spin_lock(&q->lock);
+ raw_spin_lock(&q->lock);
list_replace_init(&q->list, &list);
- spin_unlock(&q->lock);
+ raw_spin_unlock(&q->lock);
while (!list_empty(&list)) {
struct call_single_data *data;
@@ -449,14 +449,14 @@ void smp_call_function_many(const struct
cpumask_clear_cpu(this_cpu, data->cpumask);
atomic_set(&data->refs, cpumask_weight(data->cpumask));
- spin_lock_irqsave(&call_function.lock, flags);
+ raw_spin_lock_irqsave(&call_function.lock, flags);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
* observing the entry in generic_smp_call_function_interrupt()
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
- spin_unlock_irqrestore(&call_function.lock, flags);
+ raw_spin_unlock_irqrestore(&call_function.lock, flags);
/*
* Make the list addition visible before sending the ipi.
@@ -501,20 +501,20 @@ EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
- spin_lock(&call_function.lock);
+ raw_spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
- spin_unlock(&call_function.lock);
+ raw_spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
- spin_lock_irq(&call_function.lock);
+ raw_spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
- spin_unlock_irq(&call_function.lock);
+ raw_spin_unlock_irq(&call_function.lock);
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 20/23] genirq: Convert irq_desc.lock to raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (18 preceding siblings ...)
2009-12-06 18:03 ` [patch 19/23] smp: Convert smplocks to raw_spinlocks Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 21/23] hrtimers: Convert to raw_spinlocks Thomas Gleixner
` (2 subsequent siblings)
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: irq-convert-to-core-locks.patch --]
[-- Type: text/plain, Size: 65582 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/kernel/irq.c | 4 -
arch/arm/include/asm/mach/irq.h | 4 -
arch/arm/kernel/irq.c | 12 +--
arch/arm/mach-ns9xxx/irq.c | 8 +-
arch/avr32/kernel/irq.c | 4 -
arch/blackfin/kernel/irqchip.c | 6 -
arch/blackfin/kernel/traps.c | 4 -
arch/cris/kernel/irq.c | 4 -
arch/frv/kernel/irq.c | 4 -
arch/h8300/kernel/irq.c | 4 -
arch/ia64/kernel/iosapic.c | 6 -
arch/ia64/kernel/irq.c | 4 -
arch/ia64/kernel/irq_ia64.c | 4 -
arch/m32r/kernel/irq.c | 4 -
arch/microblaze/kernel/irq.c | 4 -
arch/mips/kernel/irq.c | 4 -
arch/mips/vr41xx/common/icu.c | 92 ++++++++++++++--------------
arch/mn10300/kernel/irq.c | 4 -
arch/parisc/kernel/irq.c | 4 -
arch/powerpc/kernel/irq.c | 8 +-
arch/powerpc/platforms/52xx/media5200.c | 8 +-
arch/powerpc/platforms/cell/interrupt.c | 8 +-
arch/powerpc/platforms/iseries/irq.c | 4 -
arch/powerpc/platforms/pseries/eeh_driver.c | 4 -
arch/powerpc/platforms/pseries/xics.c | 4 -
arch/powerpc/sysdev/fsl_msi.c | 4 -
arch/powerpc/sysdev/uic.c | 8 +-
arch/sh/kernel/irq.c | 4 -
arch/sparc/kernel/irq_64.c | 8 +-
arch/um/kernel/irq.c | 4 -
arch/x86/kernel/apic/io_apic.c | 4 -
arch/x86/kernel/irq.c | 14 ++--
arch/xtensa/kernel/irq.c | 4 -
include/linux/irq.h | 2
kernel/irq/autoprobe.c | 20 +++---
kernel/irq/chip.c | 86 +++++++++++++-------------
kernel/irq/handle.c | 22 +++---
kernel/irq/internals.h | 2
kernel/irq/manage.c | 50 +++++++--------
kernel/irq/migration.c | 2
kernel/irq/numa_migrate.c | 8 +-
kernel/irq/pm.c | 8 +-
kernel/irq/proc.c | 4 -
kernel/irq/spurious.c | 14 ++--
44 files changed, 242 insertions(+), 242 deletions(-)
Index: linux-2.6-tip/arch/alpha/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/alpha/kernel/irq.c
+++ linux-2.6-tip/arch/alpha/kernel/irq.c
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void
#endif
if (irq < ACTUAL_NR_IRQS) {
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
action = irq_desc[irq].action;
if (!action)
goto unlock;
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} else if (irq == ACTUAL_NR_IRQS) {
#ifdef CONFIG_SMP
seq_puts(p, "IPI: ");
Index: linux-2.6-tip/arch/arm/include/asm/mach/irq.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/mach/irq.h
+++ linux-2.6-tip/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file
*/
#define do_bad_IRQ(irq,desc) \
do { \
- spin_lock(&desc->lock); \
+ raw_spin_lock(&desc->lock); \
handle_bad_irq(irq, desc); \
- spin_unlock(&desc->lock); \
+ raw_spin_unlock(&desc->lock); \
} while(0)
#endif
Index: linux-2.6-tip/arch/arm/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/arm/kernel/irq.c
+++ linux-2.6-tip/arch/arm/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
#ifdef CONFIG_FIQ
show_fiq_list(p, v);
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, uns
}
desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (iflags & IRQF_VALID)
desc->status &= ~IRQ_NOREQUEST;
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, uns
desc->status &= ~IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
desc->status &= ~IRQ_NOAUTOEN;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init init_IRQ(void)
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *d
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
Index: linux-2.6-tip/arch/arm/mach-ns9xxx/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/arm/mach-ns9xxx/irq.c
+++ linux-2.6-tip/arch/arm/mach-ns9xxx/irq.c
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
BUG_ON(desc->status & IRQ_INPROGRESS);
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int
goto out_mask;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int
* Maybe this function should go to kernel/irq/chip.c? */
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (desc->status & IRQ_DISABLED)
@@ -97,7 +97,7 @@ out_mask:
/* ack unconditionally to unmask lower prio irqs */
desc->chip->ack(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
#define handle_irq handle_prio_irq
#endif
Index: linux-2.6-tip/arch/avr32/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/avr32/kernel/irq.c
+++ linux-2.6-tip/arch/avr32/kernel/irq.c
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -66,7 +66,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
Index: linux-2.6-tip/arch/blackfin/kernel/irqchip.c
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/kernel/irqchip.c
+++ linux-2.6-tip/arch/blackfin/kernel/irqchip.c
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p,
unsigned long flags;
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
Index: linux-2.6-tip/arch/blackfin/kernel/traps.c
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/kernel/traps.c
+++ linux-2.6-tip/arch/blackfin/kernel/traps.c
@@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp)
if (fp->ipend & ~0x3F) {
for (i = 0; i < (NR_IRQS - 1); i++) {
if (!in_atomic)
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
@@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp)
verbose_printk("\n");
unlock:
if (!in_atomic)
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
}
Index: linux-2.6-tip/arch/cris/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/cris/kernel/irq.c
+++ linux-2.6-tip/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/frv/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/frv/kernel/irq.c
+++ linux-2.6-tip/arch/frv/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
seq_printf(p, "%3d: ", i);
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
}
Index: linux-2.6-tip/arch/h8300/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/h8300/kernel/irq.c
+++ linux-2.6-tip/arch/h8300/kernel/irq.c
@@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p,
seq_puts(p, " CPU0");
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/ia64/kernel/iosapic.c
===================================================================
--- linux-2.6-tip.orig/arch/ia64/kernel/iosapic.c
+++ linux-2.6-tip/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
goto unlock_iosapic_lock;
}
- spin_lock(&irq_desc[irq].lock);
+ raw_spin_lock(&irq_desc[irq].lock);
dest = get_target_cpu(gsi, irq);
dmode = choose_dmode();
err = register_intr(gsi, irq, dmode, polarity, trigger);
if (err < 0) {
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
irq = err;
goto unlock_iosapic_lock;
}
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
return irq;
Index: linux-2.6-tip/arch/ia64/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/ia64/kernel/irq.c
+++ linux-2.6-tip/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return 0;
Index: linux-2.6-tip/arch/ia64/kernel/irq_ia64.c
===================================================================
--- linux-2.6-tip.orig/arch/ia64/kernel/irq_ia64.c
+++ linux-2.6-tip/arch/ia64/kernel/irq_ia64.c
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_
desc = irq_desc + irq;
cfg = irq_cfg + irq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
return IRQ_HANDLED;
}
Index: linux-2.6-tip/arch/m32r/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/m32r/kernel/irq.c
+++ linux-2.6-tip/arch/m32r/kernel/irq.c
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/microblaze/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/microblaze/kernel/irq.c
+++ linux-2.6-tip/arch/microblaze/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < nr_irq) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/mips/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/mips/kernel/irq.c
+++ linux-2.6-tip/arch/mips/kernel/irq.c
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_putc(p, '\n');
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
Index: linux-2.6-tip/arch/mips/vr41xx/common/icu.c
===================================================================
--- linux-2.6-tip.orig/arch/mips/vr41xx/common/icu.c
+++ linux-2.6-tip/arch/mips/vr41xx/common/icu.c
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_macint);
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_macint);
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_dsiuint);
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mas
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_dsiuint);
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_firint);
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_firint);
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, PCIINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, SCUINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, BCUINTR);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(uns
pin = SYSINT1_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign0 = icu1_read(INTASSIGN0);
intassign1 = icu1_read(INTASSIGN1);
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(uns
intassign1 |= (uint16_t)assign << 9;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(uns
icu1_write(INTASSIGN0, intassign0);
icu1_write(INTASSIGN1, intassign1);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(uns
pin = SYSINT2_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign2 = icu1_read(INTASSIGN2);
intassign3 = icu1_read(INTASSIGN3);
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(uns
intassign3 |= (uint16_t)assign << 12;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(uns
icu1_write(INTASSIGN2, intassign2);
icu1_write(INTASSIGN3, intassign3);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
Index: linux-2.6-tip/arch/mn10300/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/mn10300/kernel/irq.c
+++ linux-2.6-tip/arch/mn10300/kernel/irq.c
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p,
/* display information rows, one per active CPU */
case 1 ... NR_IRQS - 1:
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
break;
/* polish off with NMI and error counters */
Index: linux-2.6-tip/arch/parisc/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/parisc/kernel/irq.c
+++ linux-2.6-tip/arch/parisc/kernel/irq.c
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p,
if (i < NR_IRQS) {
struct irqaction *action;
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
Index: linux-2.6-tip/arch/powerpc/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/kernel/irq.c
+++ linux-2.6-tip/arch/powerpc/kernel/irq.c
@@ -193,7 +193,7 @@ int show_interrupts(struct seq_file *p,
if (i < NR_IRQS) {
desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action || !action->handler)
goto skip;
@@ -214,7 +214,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS) {
#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized){
@@ -1071,7 +1071,7 @@ static int virq_debug_show(struct seq_fi
for (i = 1; i < NR_IRQS; i++) {
desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
seq_printf(m, "%5d ", i);
@@ -1090,7 +1090,7 @@ static int virq_debug_show(struct seq_fi
seq_printf(m, "%s\n", p);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
Index: linux-2.6-tip/arch/powerpc/platforms/52xx/media5200.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/52xx/media5200.c
+++ linux-2.6-tip/arch/powerpc/platforms/52xx/media5200.c
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int
u32 status, enable;
/* Mask off the cascaded IRQ */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->mask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
* are pending. 'ffs()' is 1 based */
@@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int
}
/* Processing done; can reenable the cascade now */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED))
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
Index: linux-2.6-tip/arch/powerpc/platforms/cell/interrupt.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/cell/interrupt.c
+++ linux-2.6-tip/arch/powerpc/platforms/cell/interrupt.c
@@ -237,7 +237,7 @@ extern int noirqdebug;
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int
goto out_eoi;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_eoi:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int iic_host_map(struct irq_host *h, unsigned int virq,
Index: linux-2.6-tip/arch/powerpc/platforms/iseries/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/iseries/irq.c
+++ linux-2.6-tip/arch/powerpc/platforms/iseries/irq.c
@@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs()
struct irq_desc *desc = get_irq_desc(irq);
if (desc && desc->chip && desc->chip->startup) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip->startup(irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
Index: linux-2.6-tip/arch/powerpc/platforms/pseries/eeh_driver.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/pseries/eeh_driver.c
+++ linux-2.6-tip/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -72,10 +72,10 @@ static int irq_in_use(unsigned int irq)
unsigned long flags;
struct irq_desc *desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action)
rc = 1;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return rc;
}
Index: linux-2.6-tip/arch/powerpc/platforms/pseries/xics.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/pseries/xics.c
+++ linux-2.6-tip/arch/powerpc/platforms/pseries/xics.c
@@ -860,7 +860,7 @@ void xics_migrate_irqs_away(void)
|| desc->chip->set_affinity == NULL)
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
@@ -884,7 +884,7 @@ void xics_migrate_irqs_away(void)
cpumask_setall(irq_desc[virq].affinity);
desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#endif
Index: linux-2.6-tip/arch/powerpc/sysdev/fsl_msi.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/sysdev/fsl_msi.c
+++ linux-2.6-tip/arch/powerpc/sysdev/fsl_msi.c
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int
u32 intr_index;
u32 have_shift = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
if (desc->chip->mask_ack)
desc->chip->mask_ack(irq);
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int
break;
}
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int __devinit fsl_of_msi_probe(struct of_device *dev,
Index: linux-2.6-tip/arch/powerpc/sysdev/uic.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/sysdev/uic.c
+++ linux-2.6-tip/arch/powerpc/sysdev/uic.c
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq,
int src;
int subvirq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->mask(virq);
else
desc->chip->mask_ack(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
if (!msr) /* spurious interrupt */
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq,
generic_handle_irq(subvirq);
uic_irq_ret:
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static struct uic * __init uic_init_one(struct device_node *node)
Index: linux-2.6-tip/arch/sh/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/sh/kernel/irq.c
+++ linux-2.6-tip/arch/sh/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p,
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
#endif
Index: linux-2.6-tip/arch/sparc/kernel/irq_64.c
===================================================================
--- linux-2.6-tip.orig/arch/sparc/kernel/irq_64.c
+++ linux-2.6-tip/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
for (irq = 0; irq < NR_IRQS; irq++) {
unsigned long flags;
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
irq_desc[irq].affinity);
}
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
tick_ops->disable_irq();
Index: linux-2.6-tip/arch/um/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/um/kernel/irq.c
+++ linux-2.6-tip/arch/um/kernel/irq.c
@@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_putc(p, '\n');
Index: linux-2.6-tip/arch/x86/kernel/apic/io_apic.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/apic/io_apic.c
+++ linux-2.6-tip/arch/x86/kernel/apic/io_apic.c
@@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_int
continue;
cfg = irq_cfg(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
goto unlock;
@@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_int
}
__get_cpu_var(vector_irq)[vector] = -1;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
irq_exit();
Index: linux-2.6-tip/arch/x86/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/irq.c
+++ linux-2.6-tip/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p,
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -294,12 +294,12 @@ void fixup_irqs(void)
continue;
/* interrupt's are disabled at this point */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
continue;
}
@@ -326,7 +326,7 @@ void fixup_irqs(void)
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
desc->chip->unmask(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
@@ -356,10 +356,10 @@ void fixup_irqs(void)
irq = __get_cpu_var(vector_irq)[vector];
desc = irq_to_desc(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->retrigger)
desc->chip->retrigger(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
}
}
Index: linux-2.6-tip/arch/xtensa/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/xtensa/kernel/irq.c
+++ linux-2.6-tip/arch/xtensa/kernel/irq.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
Index: linux-2.6-tip/include/linux/irq.h
===================================================================
--- linux-2.6-tip.orig/include/linux/irq.h
+++ linux-2.6-tip/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
Index: linux-2.6-tip/kernel/irq/autoprobe.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/autoprobe.c
+++ linux-2.6-tip/kernel/irq/autoprobe.c
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
* An old-style architecture might still have
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
desc->chip->set_type(i, IRQ_TYPE_PROBE);
desc->chip->startup(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->chip->startup(i))
desc->status |= IRQ_PENDING;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
if (i < 32)
mask |= 1 << i;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
return mask;
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned lon
int i;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned lon
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
unsigned int status;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
Index: linux-2.6-tip/kernel/irq/chip.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/chip.c
+++ linux-2.6-tip/kernel/irq/chip.c
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
}
/* Ensure we don't have left over values from a previous use of this irq */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
desc->chip = &no_irq_chip;
desc->handle_irq = handle_bad_irq;
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
cpumask_clear(desc->pending_mask);
#endif
#endif
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/**
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int ir
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
irq);
return;
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int ir
desc->chip = &no_irq_chip;
desc->name = NULL;
clear_kstat_irqs(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struc
if (!chip)
chip = &no_irq_chip;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
desc->chip = chip;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsig
if (type == IRQ_TYPE_NONE)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = __irq_set_trigger(desc, irq, type);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_type);
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->handler_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
EXPORT_SYMBOL(set_irq_data);
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->msi_desc = entry;
if (entry)
entry->irq = irq;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq,
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int
if (!desc)
return;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (nest)
desc->status |= IRQ_NESTED_THREAD;
else
desc->status &= ~IRQ_NESTED_THREAD;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
@@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
might_sleep();
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
@@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, stru
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, stru
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struc
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
mask_ack_irq(desc, irq);
if (unlikely(desc->status & IRQ_INPROGRESS))
@@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struc
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (unlikely(desc->status & IRQ_ONESHOT))
@@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struc
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
@@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, str
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out;
@@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, str
desc->status |= IRQ_INPROGRESS;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -530,7 +530,7 @@ out:
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct
}
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_
}
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
if (handle == handle_bad_irq) {
@@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_
desc->depth = 0;
desc->chip->startup(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
@@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init set_irq_probe(unsigned int irq)
@@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int i
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status &= ~IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
Index: linux-2.6-tip/kernel/irq/handle.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/handle.c
+++ linux-2.6-tip/kernel/irq/handle.c
@@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
@@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, s
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->irq = irq;
#ifdef CONFIG_SMP
desc->node = node;
@@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, s
/*
* Protect the sparse_irqs:
*/
-DEFINE_SPINLOCK(sparse_irq_lock);
+DEFINE_RAW_SPINLOCK(sparse_irq_lock);
struct irq_desc **irq_desc_ptrs __read_mostly;
@@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[N
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
}
};
@@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_allo
if (desc)
return desc;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_allo
irq_desc_ptrs[irq] = desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
@@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cach
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
@@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
return 1;
}
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->ack)
desc->chip->ack(irq);
/*
@@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
for (;;) {
irqreturn_t action_ret;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
@@ -536,7 +536,7 @@ out:
* disabled while the handler was running.
*/
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return 1;
}
Index: linux-2.6-tip/kernel/irq/internals.h
===================================================================
--- linux-2.6-tip.orig/kernel/irq/internals.h
+++ linux-2.6-tip/kernel/irq/internals.h
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
extern void clear_kstat_irqs(struct irq_desc *desc);
-extern spinlock_t sparse_irq_lock;
+extern raw_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
Index: linux-2.6-tip/kernel/irq/manage.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/manage.c
+++ linux-2.6-tip/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = desc->status;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, c
if (!desc->chip->set_affinity)
return -EINVAL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, c
}
#endif
desc->status |= IRQ_AFFINITY_SET;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int
unsigned long flags;
int ret;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc);
if (!ret)
irq_set_thread_affinity(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(enable_irq);
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsig
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsig
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_wake);
@@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
{
chip_bus_lock(irq, desc);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
desc->chip->unmask(irq);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(irq, desc);
}
@@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_des
return;
}
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
cpumask_copy(mask, desc->affinity);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
@@ -545,7 +545,7 @@ static int irq_thread(void *data)
atomic_inc(&desc->threads_active);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (unlikely(desc->status & IRQ_DISABLED)) {
/*
* CHECKME: We might need a dedicated
@@ -555,9 +555,9 @@ static int irq_thread(void *data)
* retriggers the interrupt itself --- tglx
*/
desc->status |= IRQ_PENDING;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
} else {
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action->thread_fn(action->irq, action->dev_id);
@@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq
/*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
old_ptr = &desc->action;
old = *old_ptr;
if (old) {
@@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq
__enable_irq(desc, irq, false);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/*
* Strictly no need to wake it up, but hung_task complains
@@ -802,7 +802,7 @@ mismatch:
ret = -EBUSY;
out_thread:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
if (new->thread) {
struct task_struct *t = new->thread;
@@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsi
if (!desc)
return NULL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/*
* There can be multiple actions per IRQ descriptor, find the right
@@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsi
if (!action) {
WARN(1, "Trying to free already-free IRQ %d\n", irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return NULL;
}
@@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsi
desc->chip->disable(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
Index: linux-2.6-tip/kernel/irq/migration.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/migration.c
+++ linux-2.6-tip/kernel/irq/migration.c
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
if (!desc->chip->set_affinity)
return;
- assert_spin_locked(&desc->lock);
+ assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
Index: linux-2.6-tip/kernel/irq/numa_migrate.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/numa_migrate.c
+++ linux-2.6-tip/kernel/irq/numa_migrate.c
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int i
"for migration.\n", irq);
return false;
}
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->node = node;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_
irq = old_desc->irq;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_
}
irq_desc_ptrs[irq] = desc;
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_
return desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
Index: linux-2.6-tip/kernel/irq/pm.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/pm.c
+++ linux-2.6-tip/kernel/irq/pm.c
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
for_each_irq_desc(irq, desc) {
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
for_each_irq_desc(irq, desc)
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
if (!(desc->status & IRQ_SUSPENDED))
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
EXPORT_SYMBOL_GPL(resume_device_irqs);
Index: linux-2.6-tip/kernel/irq/proc.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/proc.c
+++ linux-2.6-tip/kernel/irq/proc.c
@@ -179,7 +179,7 @@ static int name_unique(unsigned int irq,
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for (action = desc->action ; action; action = action->next) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
@@ -187,7 +187,7 @@ static int name_unique(unsigned int irq,
break;
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
Index: linux-2.6-tip/kernel/irq/spurious.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/spurious.c
+++ linux-2.6-tip/kernel/irq/spurious.c
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct i
struct irqaction *action;
int ok = 0, work = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
/* Already running on another processor */
if (desc->status & IRQ_INPROGRESS) {
/*
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct i
*/
if (desc->action && (desc->action->flags & IRQF_SHARED))
desc->status |= IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
/* Honour the normal IRQ locking */
desc->status |= IRQ_INPROGRESS;
action = desc->action;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
while (action) {
/* Only shared IRQ handlers are safe to call */
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct i
}
local_irq_disable();
/* Now clean up the flags */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
action = desc->action;
/*
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct i
* Perform real IRQ processing for the IRQ we deferred
*/
work = 1;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
handle_IRQ_event(irq, action);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct i
*/
if (work && desc->chip && desc->chip->end)
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 20/23] genirq: Convert irq_desc.lock to raw_spinlock
2009-12-06 18:03 ` [patch 20/23] genirq: Convert irq_desc.lock to raw_spinlock Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: irq-convert-to-core-locks.patch --]
[-- Type: text/plain, Size: 65584 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/kernel/irq.c | 4 -
arch/arm/include/asm/mach/irq.h | 4 -
arch/arm/kernel/irq.c | 12 +--
arch/arm/mach-ns9xxx/irq.c | 8 +-
arch/avr32/kernel/irq.c | 4 -
arch/blackfin/kernel/irqchip.c | 6 -
arch/blackfin/kernel/traps.c | 4 -
arch/cris/kernel/irq.c | 4 -
arch/frv/kernel/irq.c | 4 -
arch/h8300/kernel/irq.c | 4 -
arch/ia64/kernel/iosapic.c | 6 -
arch/ia64/kernel/irq.c | 4 -
arch/ia64/kernel/irq_ia64.c | 4 -
arch/m32r/kernel/irq.c | 4 -
arch/microblaze/kernel/irq.c | 4 -
arch/mips/kernel/irq.c | 4 -
arch/mips/vr41xx/common/icu.c | 92 ++++++++++++++--------------
arch/mn10300/kernel/irq.c | 4 -
arch/parisc/kernel/irq.c | 4 -
arch/powerpc/kernel/irq.c | 8 +-
arch/powerpc/platforms/52xx/media5200.c | 8 +-
arch/powerpc/platforms/cell/interrupt.c | 8 +-
arch/powerpc/platforms/iseries/irq.c | 4 -
arch/powerpc/platforms/pseries/eeh_driver.c | 4 -
arch/powerpc/platforms/pseries/xics.c | 4 -
arch/powerpc/sysdev/fsl_msi.c | 4 -
arch/powerpc/sysdev/uic.c | 8 +-
arch/sh/kernel/irq.c | 4 -
arch/sparc/kernel/irq_64.c | 8 +-
arch/um/kernel/irq.c | 4 -
arch/x86/kernel/apic/io_apic.c | 4 -
arch/x86/kernel/irq.c | 14 ++--
arch/xtensa/kernel/irq.c | 4 -
include/linux/irq.h | 2
kernel/irq/autoprobe.c | 20 +++---
kernel/irq/chip.c | 86 +++++++++++++-------------
kernel/irq/handle.c | 22 +++---
kernel/irq/internals.h | 2
kernel/irq/manage.c | 50 +++++++--------
kernel/irq/migration.c | 2
kernel/irq/numa_migrate.c | 8 +-
kernel/irq/pm.c | 8 +-
kernel/irq/proc.c | 4 -
kernel/irq/spurious.c | 14 ++--
44 files changed, 242 insertions(+), 242 deletions(-)
Index: linux-2.6-tip/arch/alpha/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/alpha/kernel/irq.c
+++ linux-2.6-tip/arch/alpha/kernel/irq.c
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void
#endif
if (irq < ACTUAL_NR_IRQS) {
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
action = irq_desc[irq].action;
if (!action)
goto unlock;
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} else if (irq == ACTUAL_NR_IRQS) {
#ifdef CONFIG_SMP
seq_puts(p, "IPI: ");
Index: linux-2.6-tip/arch/arm/include/asm/mach/irq.h
===================================================================
--- linux-2.6-tip.orig/arch/arm/include/asm/mach/irq.h
+++ linux-2.6-tip/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file
*/
#define do_bad_IRQ(irq,desc) \
do { \
- spin_lock(&desc->lock); \
+ raw_spin_lock(&desc->lock); \
handle_bad_irq(irq, desc); \
- spin_unlock(&desc->lock); \
+ raw_spin_unlock(&desc->lock); \
} while(0)
#endif
Index: linux-2.6-tip/arch/arm/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/arm/kernel/irq.c
+++ linux-2.6-tip/arch/arm/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
#ifdef CONFIG_FIQ
show_fiq_list(p, v);
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, uns
}
desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (iflags & IRQF_VALID)
desc->status &= ~IRQ_NOREQUEST;
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, uns
desc->status &= ~IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
desc->status &= ~IRQ_NOAUTOEN;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init init_IRQ(void)
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *d
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
Index: linux-2.6-tip/arch/arm/mach-ns9xxx/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/arm/mach-ns9xxx/irq.c
+++ linux-2.6-tip/arch/arm/mach-ns9xxx/irq.c
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
BUG_ON(desc->status & IRQ_INPROGRESS);
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int
goto out_mask;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int
* Maybe this function should go to kernel/irq/chip.c? */
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (desc->status & IRQ_DISABLED)
@@ -97,7 +97,7 @@ out_mask:
/* ack unconditionally to unmask lower prio irqs */
desc->chip->ack(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
#define handle_irq handle_prio_irq
#endif
Index: linux-2.6-tip/arch/avr32/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/avr32/kernel/irq.c
+++ linux-2.6-tip/arch/avr32/kernel/irq.c
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -66,7 +66,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
Index: linux-2.6-tip/arch/blackfin/kernel/irqchip.c
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/kernel/irqchip.c
+++ linux-2.6-tip/arch/blackfin/kernel/irqchip.c
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p,
unsigned long flags;
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
Index: linux-2.6-tip/arch/blackfin/kernel/traps.c
===================================================================
--- linux-2.6-tip.orig/arch/blackfin/kernel/traps.c
+++ linux-2.6-tip/arch/blackfin/kernel/traps.c
@@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp)
if (fp->ipend & ~0x3F) {
for (i = 0; i < (NR_IRQS - 1); i++) {
if (!in_atomic)
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
@@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp)
verbose_printk("\n");
unlock:
if (!in_atomic)
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
}
Index: linux-2.6-tip/arch/cris/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/cris/kernel/irq.c
+++ linux-2.6-tip/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/frv/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/frv/kernel/irq.c
+++ linux-2.6-tip/arch/frv/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
seq_printf(p, "%3d: ", i);
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
}
Index: linux-2.6-tip/arch/h8300/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/h8300/kernel/irq.c
+++ linux-2.6-tip/arch/h8300/kernel/irq.c
@@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p,
seq_puts(p, " CPU0");
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/ia64/kernel/iosapic.c
===================================================================
--- linux-2.6-tip.orig/arch/ia64/kernel/iosapic.c
+++ linux-2.6-tip/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
goto unlock_iosapic_lock;
}
- spin_lock(&irq_desc[irq].lock);
+ raw_spin_lock(&irq_desc[irq].lock);
dest = get_target_cpu(gsi, irq);
dmode = choose_dmode();
err = register_intr(gsi, irq, dmode, polarity, trigger);
if (err < 0) {
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
irq = err;
goto unlock_iosapic_lock;
}
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
return irq;
Index: linux-2.6-tip/arch/ia64/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/ia64/kernel/irq.c
+++ linux-2.6-tip/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return 0;
Index: linux-2.6-tip/arch/ia64/kernel/irq_ia64.c
===================================================================
--- linux-2.6-tip.orig/arch/ia64/kernel/irq_ia64.c
+++ linux-2.6-tip/arch/ia64/kernel/irq_ia64.c
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_
desc = irq_desc + irq;
cfg = irq_cfg + irq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
return IRQ_HANDLED;
}
Index: linux-2.6-tip/arch/m32r/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/m32r/kernel/irq.c
+++ linux-2.6-tip/arch/m32r/kernel/irq.c
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/microblaze/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/microblaze/kernel/irq.c
+++ linux-2.6-tip/arch/microblaze/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < nr_irq) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
Index: linux-2.6-tip/arch/mips/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/mips/kernel/irq.c
+++ linux-2.6-tip/arch/mips/kernel/irq.c
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_putc(p, '\n');
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
Index: linux-2.6-tip/arch/mips/vr41xx/common/icu.c
===================================================================
--- linux-2.6-tip.orig/arch/mips/vr41xx/common/icu.c
+++ linux-2.6-tip/arch/mips/vr41xx/common/icu.c
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_macint);
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_macint);
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_dsiuint);
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mas
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_dsiuint);
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_firint);
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_firint);
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, PCIINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, SCUINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, BCUINTR);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(uns
pin = SYSINT1_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign0 = icu1_read(INTASSIGN0);
intassign1 = icu1_read(INTASSIGN1);
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(uns
intassign1 |= (uint16_t)assign << 9;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(uns
icu1_write(INTASSIGN0, intassign0);
icu1_write(INTASSIGN1, intassign1);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(uns
pin = SYSINT2_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign2 = icu1_read(INTASSIGN2);
intassign3 = icu1_read(INTASSIGN3);
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(uns
intassign3 |= (uint16_t)assign << 12;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(uns
icu1_write(INTASSIGN2, intassign2);
icu1_write(INTASSIGN3, intassign3);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
Index: linux-2.6-tip/arch/mn10300/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/mn10300/kernel/irq.c
+++ linux-2.6-tip/arch/mn10300/kernel/irq.c
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p,
/* display information rows, one per active CPU */
case 1 ... NR_IRQS - 1:
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
break;
/* polish off with NMI and error counters */
Index: linux-2.6-tip/arch/parisc/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/parisc/kernel/irq.c
+++ linux-2.6-tip/arch/parisc/kernel/irq.c
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p,
if (i < NR_IRQS) {
struct irqaction *action;
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
Index: linux-2.6-tip/arch/powerpc/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/kernel/irq.c
+++ linux-2.6-tip/arch/powerpc/kernel/irq.c
@@ -193,7 +193,7 @@ int show_interrupts(struct seq_file *p,
if (i < NR_IRQS) {
desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action || !action->handler)
goto skip;
@@ -214,7 +214,7 @@ int show_interrupts(struct seq_file *p,
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS) {
#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized){
@@ -1071,7 +1071,7 @@ static int virq_debug_show(struct seq_fi
for (i = 1; i < NR_IRQS; i++) {
desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
seq_printf(m, "%5d ", i);
@@ -1090,7 +1090,7 @@ static int virq_debug_show(struct seq_fi
seq_printf(m, "%s\n", p);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
Index: linux-2.6-tip/arch/powerpc/platforms/52xx/media5200.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/52xx/media5200.c
+++ linux-2.6-tip/arch/powerpc/platforms/52xx/media5200.c
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int
u32 status, enable;
/* Mask off the cascaded IRQ */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->mask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
* are pending. 'ffs()' is 1 based */
@@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int
}
/* Processing done; can reenable the cascade now */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED))
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
Index: linux-2.6-tip/arch/powerpc/platforms/cell/interrupt.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/cell/interrupt.c
+++ linux-2.6-tip/arch/powerpc/platforms/cell/interrupt.c
@@ -237,7 +237,7 @@ extern int noirqdebug;
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int
goto out_eoi;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_eoi:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int iic_host_map(struct irq_host *h, unsigned int virq,
Index: linux-2.6-tip/arch/powerpc/platforms/iseries/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/iseries/irq.c
+++ linux-2.6-tip/arch/powerpc/platforms/iseries/irq.c
@@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs()
struct irq_desc *desc = get_irq_desc(irq);
if (desc && desc->chip && desc->chip->startup) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip->startup(irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
Index: linux-2.6-tip/arch/powerpc/platforms/pseries/eeh_driver.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/pseries/eeh_driver.c
+++ linux-2.6-tip/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -72,10 +72,10 @@ static int irq_in_use(unsigned int irq)
unsigned long flags;
struct irq_desc *desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action)
rc = 1;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return rc;
}
Index: linux-2.6-tip/arch/powerpc/platforms/pseries/xics.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/platforms/pseries/xics.c
+++ linux-2.6-tip/arch/powerpc/platforms/pseries/xics.c
@@ -860,7 +860,7 @@ void xics_migrate_irqs_away(void)
|| desc->chip->set_affinity == NULL)
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
@@ -884,7 +884,7 @@ void xics_migrate_irqs_away(void)
cpumask_setall(irq_desc[virq].affinity);
desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#endif
Index: linux-2.6-tip/arch/powerpc/sysdev/fsl_msi.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/sysdev/fsl_msi.c
+++ linux-2.6-tip/arch/powerpc/sysdev/fsl_msi.c
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int
u32 intr_index;
u32 have_shift = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
if (desc->chip->mask_ack)
desc->chip->mask_ack(irq);
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int
break;
}
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int __devinit fsl_of_msi_probe(struct of_device *dev,
Index: linux-2.6-tip/arch/powerpc/sysdev/uic.c
===================================================================
--- linux-2.6-tip.orig/arch/powerpc/sysdev/uic.c
+++ linux-2.6-tip/arch/powerpc/sysdev/uic.c
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq,
int src;
int subvirq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->mask(virq);
else
desc->chip->mask_ack(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
if (!msr) /* spurious interrupt */
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq,
generic_handle_irq(subvirq);
uic_irq_ret:
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static struct uic * __init uic_init_one(struct device_node *node)
Index: linux-2.6-tip/arch/sh/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/sh/kernel/irq.c
+++ linux-2.6-tip/arch/sh/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p,
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
#endif
Index: linux-2.6-tip/arch/sparc/kernel/irq_64.c
===================================================================
--- linux-2.6-tip.orig/arch/sparc/kernel/irq_64.c
+++ linux-2.6-tip/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
for (irq = 0; irq < NR_IRQS; irq++) {
unsigned long flags;
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
irq_desc[irq].affinity);
}
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
tick_ops->disable_irq();
Index: linux-2.6-tip/arch/um/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/um/kernel/irq.c
+++ linux-2.6-tip/arch/um/kernel/irq.c
@@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_putc(p, '\n');
Index: linux-2.6-tip/arch/x86/kernel/apic/io_apic.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/apic/io_apic.c
+++ linux-2.6-tip/arch/x86/kernel/apic/io_apic.c
@@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_int
continue;
cfg = irq_cfg(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
goto unlock;
@@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_int
}
__get_cpu_var(vector_irq)[vector] = -1;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
irq_exit();
Index: linux-2.6-tip/arch/x86/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/x86/kernel/irq.c
+++ linux-2.6-tip/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p,
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -294,12 +294,12 @@ void fixup_irqs(void)
continue;
/* interrupt's are disabled at this point */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
continue;
}
@@ -326,7 +326,7 @@ void fixup_irqs(void)
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
desc->chip->unmask(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
@@ -356,10 +356,10 @@ void fixup_irqs(void)
irq = __get_cpu_var(vector_irq)[vector];
desc = irq_to_desc(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->retrigger)
desc->chip->retrigger(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
}
}
Index: linux-2.6-tip/arch/xtensa/kernel/irq.c
===================================================================
--- linux-2.6-tip.orig/arch/xtensa/kernel/irq.c
+++ linux-2.6-tip/arch/xtensa/kernel/irq.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p,
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p,
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
Index: linux-2.6-tip/include/linux/irq.h
===================================================================
--- linux-2.6-tip.orig/include/linux/irq.h
+++ linux-2.6-tip/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
Index: linux-2.6-tip/kernel/irq/autoprobe.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/autoprobe.c
+++ linux-2.6-tip/kernel/irq/autoprobe.c
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
* An old-style architecture might still have
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
desc->chip->set_type(i, IRQ_TYPE_PROBE);
desc->chip->startup(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->chip->startup(i))
desc->status |= IRQ_PENDING;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
if (i < 32)
mask |= 1 << i;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
return mask;
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned lon
int i;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned lon
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
unsigned int status;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
Index: linux-2.6-tip/kernel/irq/chip.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/chip.c
+++ linux-2.6-tip/kernel/irq/chip.c
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
}
/* Ensure we don't have left over values from a previous use of this irq */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
desc->chip = &no_irq_chip;
desc->handle_irq = handle_bad_irq;
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
cpumask_clear(desc->pending_mask);
#endif
#endif
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/**
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int ir
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
irq);
return;
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int ir
desc->chip = &no_irq_chip;
desc->name = NULL;
clear_kstat_irqs(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struc
if (!chip)
chip = &no_irq_chip;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
desc->chip = chip;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsig
if (type == IRQ_TYPE_NONE)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = __irq_set_trigger(desc, irq, type);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_type);
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->handler_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
EXPORT_SYMBOL(set_irq_data);
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->msi_desc = entry;
if (entry)
entry->irq = irq;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq,
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int
if (!desc)
return;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (nest)
desc->status |= IRQ_NESTED_THREAD;
else
desc->status &= ~IRQ_NESTED_THREAD;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
@@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
might_sleep();
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
@@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, stru
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, stru
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struc
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
mask_ack_irq(desc, irq);
if (unlikely(desc->status & IRQ_INPROGRESS))
@@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struc
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (unlikely(desc->status & IRQ_ONESHOT))
@@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struc
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
@@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, str
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out;
@@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, str
desc->status |= IRQ_INPROGRESS;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -530,7 +530,7 @@ out:
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct
}
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_
}
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
if (handle == handle_bad_irq) {
@@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_
desc->depth = 0;
desc->chip->startup(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
@@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init set_irq_probe(unsigned int irq)
@@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int i
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status &= ~IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
Index: linux-2.6-tip/kernel/irq/handle.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/handle.c
+++ linux-2.6-tip/kernel/irq/handle.c
@@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
@@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, s
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->irq = irq;
#ifdef CONFIG_SMP
desc->node = node;
@@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, s
/*
* Protect the sparse_irqs:
*/
-DEFINE_SPINLOCK(sparse_irq_lock);
+DEFINE_RAW_SPINLOCK(sparse_irq_lock);
struct irq_desc **irq_desc_ptrs __read_mostly;
@@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[N
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
}
};
@@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_allo
if (desc)
return desc;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_allo
irq_desc_ptrs[irq] = desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
@@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cach
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
@@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
return 1;
}
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->ack)
desc->chip->ack(irq);
/*
@@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
for (;;) {
irqreturn_t action_ret;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
@@ -536,7 +536,7 @@ out:
* disabled while the handler was running.
*/
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return 1;
}
Index: linux-2.6-tip/kernel/irq/internals.h
===================================================================
--- linux-2.6-tip.orig/kernel/irq/internals.h
+++ linux-2.6-tip/kernel/irq/internals.h
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
extern void clear_kstat_irqs(struct irq_desc *desc);
-extern spinlock_t sparse_irq_lock;
+extern raw_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
Index: linux-2.6-tip/kernel/irq/manage.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/manage.c
+++ linux-2.6-tip/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = desc->status;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, c
if (!desc->chip->set_affinity)
return -EINVAL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, c
}
#endif
desc->status |= IRQ_AFFINITY_SET;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int
unsigned long flags;
int ret;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc);
if (!ret)
irq_set_thread_affinity(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(enable_irq);
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsig
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsig
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_wake);
@@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
{
chip_bus_lock(irq, desc);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
desc->chip->unmask(irq);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(irq, desc);
}
@@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_des
return;
}
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
cpumask_copy(mask, desc->affinity);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
@@ -545,7 +545,7 @@ static int irq_thread(void *data)
atomic_inc(&desc->threads_active);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (unlikely(desc->status & IRQ_DISABLED)) {
/*
* CHECKME: We might need a dedicated
@@ -555,9 +555,9 @@ static int irq_thread(void *data)
* retriggers the interrupt itself --- tglx
*/
desc->status |= IRQ_PENDING;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
} else {
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action->thread_fn(action->irq, action->dev_id);
@@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq
/*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
old_ptr = &desc->action;
old = *old_ptr;
if (old) {
@@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq
__enable_irq(desc, irq, false);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/*
* Strictly no need to wake it up, but hung_task complains
@@ -802,7 +802,7 @@ mismatch:
ret = -EBUSY;
out_thread:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
if (new->thread) {
struct task_struct *t = new->thread;
@@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsi
if (!desc)
return NULL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/*
* There can be multiple actions per IRQ descriptor, find the right
@@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsi
if (!action) {
WARN(1, "Trying to free already-free IRQ %d\n", irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return NULL;
}
@@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsi
desc->chip->disable(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
Index: linux-2.6-tip/kernel/irq/migration.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/migration.c
+++ linux-2.6-tip/kernel/irq/migration.c
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
if (!desc->chip->set_affinity)
return;
- assert_spin_locked(&desc->lock);
+ assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
Index: linux-2.6-tip/kernel/irq/numa_migrate.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/numa_migrate.c
+++ linux-2.6-tip/kernel/irq/numa_migrate.c
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int i
"for migration.\n", irq);
return false;
}
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->node = node;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_
irq = old_desc->irq;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_
}
irq_desc_ptrs[irq] = desc;
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_
return desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
Index: linux-2.6-tip/kernel/irq/pm.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/pm.c
+++ linux-2.6-tip/kernel/irq/pm.c
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
for_each_irq_desc(irq, desc) {
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
for_each_irq_desc(irq, desc)
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
if (!(desc->status & IRQ_SUSPENDED))
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
EXPORT_SYMBOL_GPL(resume_device_irqs);
Index: linux-2.6-tip/kernel/irq/proc.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/proc.c
+++ linux-2.6-tip/kernel/irq/proc.c
@@ -179,7 +179,7 @@ static int name_unique(unsigned int irq,
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for (action = desc->action ; action; action = action->next) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
@@ -187,7 +187,7 @@ static int name_unique(unsigned int irq,
break;
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
Index: linux-2.6-tip/kernel/irq/spurious.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/spurious.c
+++ linux-2.6-tip/kernel/irq/spurious.c
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct i
struct irqaction *action;
int ok = 0, work = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
/* Already running on another processor */
if (desc->status & IRQ_INPROGRESS) {
/*
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct i
*/
if (desc->action && (desc->action->flags & IRQF_SHARED))
desc->status |= IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
/* Honour the normal IRQ locking */
desc->status |= IRQ_INPROGRESS;
action = desc->action;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
while (action) {
/* Only shared IRQ handlers are safe to call */
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct i
}
local_irq_disable();
/* Now clean up the flags */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
action = desc->action;
/*
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct i
* Perform real IRQ processing for the IRQ we deferred
*/
work = 1;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
handle_IRQ_event(irq, action);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct i
*/
if (work && desc->chip && desc->chip->end)
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 21/23] hrtimers: Convert to raw_spinlocks
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (19 preceding siblings ...)
2009-12-06 18:03 ` [patch 20/23] genirq: Convert irq_desc.lock to raw_spinlock Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 22/23] perf_event: Convert to raw_spinlock Thomas Gleixner
2009-12-06 18:03 ` [patch 23/23] debugobjects: Convert to raw_spinlocks Thomas Gleixner
22 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: timers-convert-to-core-locks.patch --]
[-- Type: text/plain, Size: 9038 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/hrtimer.h | 2 -
kernel/hrtimer.c | 50 +++++++++++++++++++++++-----------------------
kernel/time/timer_list.c | 6 ++---
kernel/time/timer_stats.c | 14 ++++++------
4 files changed, 36 insertions(+), 36 deletions(-)
Index: linux-2.6-tip/include/linux/hrtimer.h
===================================================================
--- linux-2.6-tip.orig/include/linux/hrtimer.h
+++ linux-2.6-tip/include/linux/hrtimer.h
@@ -168,7 +168,7 @@ struct hrtimer_clock_base {
* @nr_events: Total number of timer interrupt events
*/
struct hrtimer_cpu_base {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
Index: linux-2.6-tip/kernel/hrtimer.c
===================================================================
--- linux-2.6-tip.orig/kernel/hrtimer.c
+++ linux-2.6-tip/kernel/hrtimer.c
@@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_
for (;;) {
base = timer->base;
if (likely(base != NULL)) {
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* The timer has migrated to another CPU: */
- spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
}
cpu_relax();
}
@@ -208,13 +208,13 @@ again:
/* See the comment in lock_timer_base() */
timer->base = NULL;
- spin_unlock(&base->cpu_base->lock);
- spin_lock(&new_base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
+ raw_spin_lock(&new_base->cpu_base->lock);
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
cpu = this_cpu;
- spin_unlock(&new_base->cpu_base->lock);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_unlock(&new_base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
timer->base = base;
goto again;
}
@@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *
{
struct hrtimer_clock_base *base = timer->base;
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
return base;
}
@@ -619,12 +619,12 @@ static void retrigger_next_event(void *a
base = &__get_cpu_var(hrtimer_bases);
/* Adjust CLOCK_REALTIME offset */
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
base->clock_base[CLOCK_REALTIME].offset =
timespec_to_ktime(realtime_offset);
hrtimer_force_reprogram(base, 0);
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
}
/*
@@ -685,9 +685,9 @@ static inline int hrtimer_enqueue_reprog
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
if (wakeup) {
- spin_unlock(&base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
} else
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
@@ -765,7 +765,7 @@ void __timer_stats_hrtimer_set_start_inf
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
- spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
}
/**
@@ -1098,7 +1098,7 @@ ktime_t hrtimer_get_next_event(void)
unsigned long flags;
int i;
- spin_lock_irqsave(&cpu_base->lock, flags);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!hrtimer_hres_active()) {
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
@@ -1115,7 +1115,7 @@ ktime_t hrtimer_get_next_event(void)
}
}
- spin_unlock_irqrestore(&cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
if (mindelta.tv64 < 0)
mindelta.tv64 = 0;
@@ -1197,11 +1197,11 @@ static void __run_hrtimer(struct hrtimer
* they get migrated to another cpu, therefore its safe to unlock
* the timer base.
*/
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
/*
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
@@ -1265,7 +1265,7 @@ void hrtimer_interrupt(struct clock_even
expires_next.tv64 = KTIME_MAX;
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
@@ -1321,7 +1321,7 @@ void hrtimer_interrupt(struct clock_even
* against it.
*/
cpu_base->expires_next = expires_next;
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
if (expires_next.tv64 != KTIME_MAX) {
@@ -1423,7 +1423,7 @@ void hrtimer_run_queues(void)
gettime = 0;
}
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
while ((node = base->first)) {
struct hrtimer *timer;
@@ -1435,7 +1435,7 @@ void hrtimer_run_queues(void)
__run_hrtimer(timer, &base->softirq_time);
}
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
}
}
@@ -1591,7 +1591,7 @@ static void __cpuinit init_hrtimers_cpu(
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- spin_lock_init(&cpu_base->lock);
+ raw_spin_lock_init(&cpu_base->lock);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1649,16 +1649,16 @@ static void migrate_hrtimers(int scpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
Index: linux-2.6-tip/kernel/time/timer_list.c
===================================================================
--- linux-2.6-tip.orig/kernel/time/timer_list.c
+++ linux-2.6-tip/kernel/time/timer_list.c
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m,
next_one:
i = 0;
- spin_lock_irqsave(&base->cpu_base->lock, flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
curr = base->first;
/*
@@ -100,13 +100,13 @@ next_one:
timer = rb_entry(curr, struct hrtimer, node);
tmp = *timer;
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
print_timer(m, timer, &tmp, i, now);
next++;
goto next_one;
}
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
}
static void
Index: linux-2.6-tip/kernel/time/timer_stats.c
===================================================================
--- linux-2.6-tip.orig/kernel/time/timer_stats.c
+++ linux-2.6-tip/kernel/time/timer_stats.c
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
*/
-static DEFINE_PER_CPU(spinlock_t, lookup_lock);
+static DEFINE_PER_CPU(raw_spinlock_t, lookup_lock);
/*
* Mutex to serialize state changes with show-stats activities:
@@ -238,7 +238,7 @@ void timer_stats_update_stats(void *time
/*
* It doesnt matter which lock we take:
*/
- spinlock_t *lock;
+ raw_spinlock_t *lock;
struct entry *entry, input;
unsigned long flags;
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *time
input.pid = pid;
input.timer_flag = timer_flag;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
if (!timer_stats_active)
goto out_unlock;
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *time
atomic_inc(&overflow_count);
out_unlock:
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void print_name_offset(struct seq_file *m, unsigned long addr)
@@ -348,9 +348,9 @@ static void sync_access(void)
int cpu;
for_each_online_cpu(cpu) {
- spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
+ raw_spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
/* nothing */
- spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
+ raw_spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
}
}
@@ -408,7 +408,7 @@ void __init init_timer_stats(void)
int cpu;
for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(lookup_lock, cpu));
+ raw_spin_lock_init(&per_cpu(lookup_lock, cpu));
}
static int __init init_tstats_procfs(void)
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 22/23] perf_event: Convert to raw_spinlock
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (20 preceding siblings ...)
2009-12-06 18:03 ` [patch 21/23] hrtimers: Convert to raw_spinlocks Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 23/23] debugobjects: Convert to raw_spinlocks Thomas Gleixner
22 siblings, 1 reply; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: perf-convert-to-core-lock.patch --]
[-- Type: text/plain, Size: 12710 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/perf_event.h | 2
kernel/hw_breakpoint.c | 4 -
kernel/perf_event.c | 106 ++++++++++++++++++++++-----------------------
3 files changed, 56 insertions(+), 56 deletions(-)
Index: linux-2.6-tip/include/linux/perf_event.h
===================================================================
--- linux-2.6-tip.orig/include/linux/perf_event.h
+++ linux-2.6-tip/include/linux/perf_event.h
@@ -685,7 +685,7 @@ struct perf_event_context {
* Protect the states of the events in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
Index: linux-2.6-tip/kernel/hw_breakpoint.c
===================================================================
--- linux-2.6-tip.orig/kernel/hw_breakpoint.c
+++ linux-2.6-tip/kernel/hw_breakpoint.c
@@ -130,7 +130,7 @@ static void toggle_bp_task_slot(struct t
list = &ctx->event_list;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* The current breakpoint counter is not included in the list
@@ -141,7 +141,7 @@ static void toggle_bp_task_slot(struct t
count++;
}
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
return;
Index: linux-2.6-tip/kernel/perf_event.c
===================================================================
--- linux-2.6-tip.orig/kernel/perf_event.c
+++ linux-2.6-tip/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struc
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
- spin_lock_irqsave(&ctx->lock, *flags);
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_p
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
++ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct pe
{
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
put_ctx(ctx);
}
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_con
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Protect the list operation against NMI by disabling the
* events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_con
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
@@ -488,12 +488,12 @@ retry:
task_oncpu_function_call(task, __perf_event_remove_from_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active we need to retry the smp call.
*/
if (ctx->nr_active && !list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -504,7 +504,7 @@ retry:
*/
if (!list_empty(&event->group_entry))
list_del_event(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *i
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *i
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -584,12 +584,12 @@ static void perf_event_disable(struct pe
retry:
task_oncpu_function_call(task, __perf_event_disable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -602,7 +602,7 @@ static void perf_event_disable(struct pe
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(vo
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -820,7 +820,7 @@ static void __perf_install_in_context(vo
unlock:
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -856,12 +856,12 @@ retry:
task_oncpu_function_call(task, __perf_install_in_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* we need to retry the smp call.
*/
if (ctx->is_active && list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -872,7 +872,7 @@ retry:
*/
if (list_empty(&event->group_entry))
add_event_to_ctx(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *in
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -959,7 +959,7 @@ static void __perf_event_enable(void *in
}
unlock:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -985,7 +985,7 @@ static void perf_event_enable(struct per
return;
}
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
@@ -1000,10 +1000,10 @@ static void perf_event_enable(struct per
event->state = PERF_EVENT_STATE_OFF;
retry:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
task_oncpu_function_call(task, __perf_event_enable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
@@ -1020,7 +1020,7 @@ static void perf_event_enable(struct per
__perf_event_mark_enabled(event, ctx);
out:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_
{
struct perf_event *event;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 0;
if (likely(!ctx->nr_events))
goto out;
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct ta
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
- spin_lock(&ctx->lock);
- spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&ctx->lock);
+ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct ta
perf_event_sync_stat(ctx, next_ctx);
}
- spin_unlock(&next_ctx->lock);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&next_ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_
struct perf_event *event;
int can_add_hw = 1;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_events))
goto out;
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct
struct hw_perf_event *hwc;
u64 interrupts, freq;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct
perf_enable();
}
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event
if (!ctx->nr_events)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Rotate the first entry last (works just fine for group events too):
*/
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(st
__perf_event_task_sched_out(ctx);
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry(event, &ctx->group_list, group_entry) {
if (!event->attr.enable_on_exec)
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(st
if (enabled)
unclone_ctx(ctx);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
perf_event_task_sched_in(task, smp_processor_id());
out:
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
update_context_time(ctx);
update_event_times(event);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
event->pmu->read(event);
}
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_e
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
update_context_time(ctx);
update_event_times(event);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return atomic64_read(&event->count);
@@ -1580,7 +1580,7 @@ __perf_event_init_context(struct perf_ev
struct task_struct *task)
{
memset(ctx, 0, sizeof(*ctx));
- spin_lock_init(&ctx->lock);
+ raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->group_list);
INIT_LIST_HEAD(&ctx->event_list);
@@ -1650,7 +1650,7 @@ static struct perf_event_context *find_g
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
unclone_ctx(ctx);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
if (!ctx) {
@@ -1988,7 +1988,7 @@ static int perf_event_period(struct perf
if (!value)
return -EINVAL;
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
@@ -2001,7 +2001,7 @@ static int perf_event_period(struct perf
event->hw.sample_period = value;
}
unlock:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
return ret;
}
@@ -4998,7 +4998,7 @@ void perf_event_exit_task(struct task_st
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
- spin_lock(&child_ctx->lock);
+ raw_spin_lock(&child_ctx->lock);
child->perf_event_ctxp = NULL;
/*
* If this context is a clone; unclone it so it can't get
@@ -5007,7 +5007,7 @@ void perf_event_exit_task(struct task_st
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
- spin_unlock_irqrestore(&child_ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
@@ -5291,11 +5291,11 @@ perf_set_reserve_percpu(struct sysdev_cl
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
- spin_lock_irq(&cpuctx->ctx.lock);
+ raw_spin_lock_irq(&cpuctx->ctx.lock);
mpt = min(perf_max_events - cpuctx->ctx.nr_events,
perf_max_events - perf_reserved_percpu);
cpuctx->max_pertask = mpt;
- spin_unlock_irq(&cpuctx->ctx.lock);
+ raw_spin_unlock_irq(&cpuctx->ctx.lock);
}
spin_unlock(&perf_resource_lock);
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 22/23] perf_event: Convert to raw_spinlock
2009-12-06 18:03 ` [patch 22/23] perf_event: Convert to raw_spinlock Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
0 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: perf-convert-to-core-lock.patch --]
[-- Type: text/plain, Size: 12712 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/perf_event.h | 2
kernel/hw_breakpoint.c | 4 -
kernel/perf_event.c | 106 ++++++++++++++++++++++-----------------------
3 files changed, 56 insertions(+), 56 deletions(-)
Index: linux-2.6-tip/include/linux/perf_event.h
===================================================================
--- linux-2.6-tip.orig/include/linux/perf_event.h
+++ linux-2.6-tip/include/linux/perf_event.h
@@ -685,7 +685,7 @@ struct perf_event_context {
* Protect the states of the events in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
Index: linux-2.6-tip/kernel/hw_breakpoint.c
===================================================================
--- linux-2.6-tip.orig/kernel/hw_breakpoint.c
+++ linux-2.6-tip/kernel/hw_breakpoint.c
@@ -130,7 +130,7 @@ static void toggle_bp_task_slot(struct t
list = &ctx->event_list;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* The current breakpoint counter is not included in the list
@@ -141,7 +141,7 @@ static void toggle_bp_task_slot(struct t
count++;
}
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
return;
Index: linux-2.6-tip/kernel/perf_event.c
===================================================================
--- linux-2.6-tip.orig/kernel/perf_event.c
+++ linux-2.6-tip/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struc
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
- spin_lock_irqsave(&ctx->lock, *flags);
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_p
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
++ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct pe
{
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
put_ctx(ctx);
}
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_con
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Protect the list operation against NMI by disabling the
* events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_con
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
@@ -488,12 +488,12 @@ retry:
task_oncpu_function_call(task, __perf_event_remove_from_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active we need to retry the smp call.
*/
if (ctx->nr_active && !list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -504,7 +504,7 @@ retry:
*/
if (!list_empty(&event->group_entry))
list_del_event(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *i
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *i
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -584,12 +584,12 @@ static void perf_event_disable(struct pe
retry:
task_oncpu_function_call(task, __perf_event_disable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -602,7 +602,7 @@ static void perf_event_disable(struct pe
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(vo
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -820,7 +820,7 @@ static void __perf_install_in_context(vo
unlock:
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -856,12 +856,12 @@ retry:
task_oncpu_function_call(task, __perf_install_in_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* we need to retry the smp call.
*/
if (ctx->is_active && list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -872,7 +872,7 @@ retry:
*/
if (list_empty(&event->group_entry))
add_event_to_ctx(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *in
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -959,7 +959,7 @@ static void __perf_event_enable(void *in
}
unlock:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -985,7 +985,7 @@ static void perf_event_enable(struct per
return;
}
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
@@ -1000,10 +1000,10 @@ static void perf_event_enable(struct per
event->state = PERF_EVENT_STATE_OFF;
retry:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
task_oncpu_function_call(task, __perf_event_enable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
@@ -1020,7 +1020,7 @@ static void perf_event_enable(struct per
__perf_event_mark_enabled(event, ctx);
out:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_
{
struct perf_event *event;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 0;
if (likely(!ctx->nr_events))
goto out;
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct ta
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
- spin_lock(&ctx->lock);
- spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&ctx->lock);
+ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct ta
perf_event_sync_stat(ctx, next_ctx);
}
- spin_unlock(&next_ctx->lock);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&next_ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_
struct perf_event *event;
int can_add_hw = 1;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_events))
goto out;
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct
struct hw_perf_event *hwc;
u64 interrupts, freq;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct
perf_enable();
}
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event
if (!ctx->nr_events)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Rotate the first entry last (works just fine for group events too):
*/
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(st
__perf_event_task_sched_out(ctx);
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry(event, &ctx->group_list, group_entry) {
if (!event->attr.enable_on_exec)
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(st
if (enabled)
unclone_ctx(ctx);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
perf_event_task_sched_in(task, smp_processor_id());
out:
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
update_context_time(ctx);
update_event_times(event);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
event->pmu->read(event);
}
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_e
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
update_context_time(ctx);
update_event_times(event);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return atomic64_read(&event->count);
@@ -1580,7 +1580,7 @@ __perf_event_init_context(struct perf_ev
struct task_struct *task)
{
memset(ctx, 0, sizeof(*ctx));
- spin_lock_init(&ctx->lock);
+ raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->group_list);
INIT_LIST_HEAD(&ctx->event_list);
@@ -1650,7 +1650,7 @@ static struct perf_event_context *find_g
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
unclone_ctx(ctx);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
if (!ctx) {
@@ -1988,7 +1988,7 @@ static int perf_event_period(struct perf
if (!value)
return -EINVAL;
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
@@ -2001,7 +2001,7 @@ static int perf_event_period(struct perf
event->hw.sample_period = value;
}
unlock:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
return ret;
}
@@ -4998,7 +4998,7 @@ void perf_event_exit_task(struct task_st
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
- spin_lock(&child_ctx->lock);
+ raw_spin_lock(&child_ctx->lock);
child->perf_event_ctxp = NULL;
/*
* If this context is a clone; unclone it so it can't get
@@ -5007,7 +5007,7 @@ void perf_event_exit_task(struct task_st
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
- spin_unlock_irqrestore(&child_ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
@@ -5291,11 +5291,11 @@ perf_set_reserve_percpu(struct sysdev_cl
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
- spin_lock_irq(&cpuctx->ctx.lock);
+ raw_spin_lock_irq(&cpuctx->ctx.lock);
mpt = min(perf_max_events - cpuctx->ctx.nr_events,
perf_max_events - perf_reserved_percpu);
cpuctx->max_pertask = mpt;
- spin_unlock_irq(&cpuctx->ctx.lock);
+ raw_spin_unlock_irq(&cpuctx->ctx.lock);
}
spin_unlock(&perf_resource_lock);
^ permalink raw reply [flat|nested] 42+ messages in thread
* [patch 23/23] debugobjects: Convert to raw_spinlocks
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
` (21 preceding siblings ...)
2009-12-06 18:03 ` [patch 22/23] perf_event: Convert to raw_spinlock Thomas Gleixner
@ 2009-12-06 18:03 ` Thomas Gleixner
22 siblings, 0 replies; 42+ messages in thread
From: Thomas Gleixner @ 2009-12-06 18:03 UTC (permalink / raw)
To: LKML; +Cc: Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
[-- Attachment #1: debugobjects-convert-to-core-locks.patch --]
[-- Type: text/plain, Size: 8995 bytes --]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/debugobjects.c | 74 ++++++++++++++++++++++++++---------------------------
1 file changed, 37 insertions(+), 37 deletions(-)
Index: linux-2.6-tip/lib/debugobjects.c
===================================================================
--- linux-2.6-tip.orig/lib/debugobjects.c
+++ linux-2.6-tip/lib/debugobjects.c
@@ -26,14 +26,14 @@
struct debug_bucket {
struct hlist_head list;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
-static DEFINE_SPINLOCK(pool_lock);
+static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
@@ -96,10 +96,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bu
{
struct debug_obj *obj = NULL;
- spin_lock(&pool_lock);
+ raw_spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bu
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
- spin_unlock(&pool_lock);
+ raw_spin_unlock(&pool_lock);
return obj;
}
@@ -165,7 +165,7 @@ static void free_obj_work(struct work_st
struct debug_obj *obj;
unsigned long flags;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
@@ -174,11 +174,11 @@ static void free_obj_work(struct work_st
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
}
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
/*
@@ -190,7 +190,7 @@ static void free_object(struct debug_obj
unsigned long flags;
int sched = 0;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
@@ -200,7 +200,7 @@ static void free_object(struct debug_obj
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}
@@ -221,9 +221,9 @@ static void debug_objects_oom(void)
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_move_list(&db->list, &freelist);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct d
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
@@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct d
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "init");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_init, addr, state);
return;
@@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct d
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -385,7 +385,7 @@ void debug_object_activate(void *addr, s
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -398,7 +398,7 @@ void debug_object_activate(void *addr, s
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_activate, addr, state);
return;
@@ -408,11 +408,11 @@ void debug_object_activate(void *addr, s
default:
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
return;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* This happens when a static object is activated. We
* let the type specific code decide whether this is
@@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr,
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr,
debug_print_object(&o, "deactivate");
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, st
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, st
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "destroy");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
@@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, st
break;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -529,7 +529,7 @@ void debug_object_free(void *addr, struc
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -539,17 +539,17 @@ void debug_object_free(void *addr, struc
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
hlist_del(&obj->node);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(c
repeat:
cnt = 0;
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
@@ -587,7 +587,7 @@ repeat:
debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
@@ -597,7 +597,7 @@ repeat:
break;
}
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
@@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj
}
res = 0;
out:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
if (res)
debug_objects_enabled = 0;
return res;
@@ -907,7 +907,7 @@ void __init debug_objects_early_init(voi
int i;
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
- spin_lock_init(&obj_hash[i].lock);
+ raw_spin_lock_init(&obj_hash[i].lock);
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [patch 04/23] locking: Convert raw_spinlock to arch_spinlock
2009-12-06 18:02 ` [patch 04/23] locking: Convert raw_spinlock to arch_spinlock Thomas Gleixner
@ 2009-12-06 18:10 ` Linus Torvalds
2009-12-06 18:19 ` Ingo Molnar
0 siblings, 1 reply; 42+ messages in thread
From: Linus Torvalds @ 2009-12-06 18:10 UTC (permalink / raw)
To: Thomas Gleixner
Cc: LKML, Andrew Morton, Ingo Molnar, Peter Zijlstra, Linux-Arch
On Sun, 6 Dec 2009, Thomas Gleixner wrote:
>
> Linus suggested to convert the raw_ to arch_ locks and cleanup the
> name space instead of using an artifical name like core_spin,
> atomic_spin or whatever
>
> No functional change.
Ok, I'm obviously biased, since I suggested this as a possible solution to
the naming wars, but I have to say that I like this patch regardless of
any of the other patches in the series. IOW, even without any issue of
then re-using 'raw_spinlock' for the non-preemptable one, I like how this
kind of change JustMakesSense(tm):
> --- linux-2.6-tip.orig/arch/alpha/include/asm/spinlock_types.h
> +++ linux-2.6-tip/arch/alpha/include/asm/spinlock_types.h
> @@ -7,7 +7,7 @@
>
> typedef struct {
> volatile unsigned int lock;
> -} raw_spinlock_t;
> +} arch_spinlock_t;
>
> #define __RAW_SPIN_LOCK_UNLOCKED { 0 }
>
ie I just think that even just looking at the patch, this kind of change
simply makes sense. The architectures declare their own per-architecture
"arch_spinlock", and we can then do whatever we want at a higher level
around that notion.
That said, it _is_
51 files changed, 164 insertions(+), 164 deletions(-)
but it seems to really be mainly the obvious arch header files and the
tracing infrastructure, so while it's 51 files, the impact seems to be
reasonably well-contained.
So I like it, but maybe the arch people hate it?
Linus
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [patch 04/23] locking: Convert raw_spinlock to arch_spinlock
2009-12-06 18:10 ` Linus Torvalds
@ 2009-12-06 18:19 ` Ingo Molnar
0 siblings, 0 replies; 42+ messages in thread
From: Ingo Molnar @ 2009-12-06 18:19 UTC (permalink / raw)
To: Linus Torvalds
Cc: Thomas Gleixner, LKML, Andrew Morton, Peter Zijlstra, Linux-Arch
* Linus Torvalds <torvalds@linux-foundation.org> wrote:
> So I like it, but maybe the arch people hate it?
I like it too - but i'm even more biased i guess ;-)
Ingo
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [patch 13/23] plist: Make plist debugging raw_spinlock aware
2009-12-06 18:02 ` [patch 13/23] plist: Make plist debugging raw_spinlock aware Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
@ 2009-12-07 1:21 ` Frederic Weisbecker
1 sibling, 0 replies; 42+ messages in thread
From: Frederic Weisbecker @ 2009-12-07 1:21 UTC (permalink / raw)
To: Thomas Gleixner
Cc: LKML, Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
On Sun, Dec 06, 2009 at 06:02:57PM -0000, Thomas Gleixner wrote:
> plists are used with spinlocks and raw_spinlocks. Change the plist
> debugging to handle both types.
>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> include/linux/plist.h | 43 +++++++++++++++++++++++++++++++++++++------
> kernel/futex.c | 6 +++---
> lib/plist.c | 8 +++++---
> 3 files changed, 45 insertions(+), 12 deletions(-)
>
> Index: linux-2.6-tip/include/linux/plist.h
> ===================================================================
> --- linux-2.6-tip.orig/include/linux/plist.h
> +++ linux-2.6-tip/include/linux/plist.h
> @@ -81,7 +81,8 @@ struct plist_head {
> struct list_head prio_list;
> struct list_head node_list;
> #ifdef CONFIG_DEBUG_PI_LIST
> - spinlock_t *lock;
> + raw_spinlock_t *rawlock;
> + spinlock_t *spinlock;
> #endif
> };
This could be a union probably, as we don't use both at the
same time?
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [patch 02/23] locking: Split rwlock from spinlock headers
2009-12-06 18:01 ` [patch 02/23] locking: Split rwlock from spinlock headers Thomas Gleixner
@ 2009-12-07 17:28 ` Arnd Bergmann
0 siblings, 0 replies; 42+ messages in thread
From: Arnd Bergmann @ 2009-12-07 17:28 UTC (permalink / raw)
To: Thomas Gleixner
Cc: LKML, Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
On Sunday 06 December 2009, Thomas Gleixner wrote:
> +
> +#ifndef __LINUX_SPINLOCK_H
> +# error "please don't include this file directly"
> +#endif
> +
Hmm, why not? I think it would be nice to have rwlock.h self-contained
to the degree that users of rwlock_t but not spinlock_t only need to
include the rwlock header and vice versa.
Obviously, we'd have to clean up all the ~200 users of rwlock_t before
we're able to remove rwlock.h from spinlock.h, but the other direction
could be done easily.
Arnd <><
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [patch 09/23] locking: Implement new raw_spinlock
2009-12-06 18:02 ` [patch 09/23] locking: Implement new raw_spinlock Thomas Gleixner
@ 2009-12-08 13:33 ` Yong Zhang
0 siblings, 0 replies; 42+ messages in thread
From: Yong Zhang @ 2009-12-08 13:33 UTC (permalink / raw)
To: Thomas Gleixner
Cc: LKML, Linus Torvalds, Andrew Morton, Ingo Molnar, Peter Zijlstra,
Linux-Arch
On Mon, Dec 7, 2009 at 2:02 AM, Thomas Gleixner <tglx@linutronix.de> wrote:
> Now that the raw_spin name space is freed up, we can implement
> raw_spinlock and the related functions which are used to annotate the
> locks which are not converted to sleeping spinlocks in preempt-rt.
>
> A side effect is that only such locks can be used with the low level
> lock fsunctions which circumvent lockdep.
s/fsunctions/functions
^ permalink raw reply [flat|nested] 42+ messages in thread
end of thread, other threads:[~2009-12-08 13:33 UTC | newest]
Thread overview: 42+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-12-06 18:01 [patch 00/23] locking: name space cleanup and -rt spinlock annotation Thomas Gleixner
2009-12-06 18:01 ` [patch 01/23] locking: Reorder functions in spinlock.c Thomas Gleixner
2009-12-06 18:01 ` Thomas Gleixner
2009-12-06 18:01 ` [patch 02/23] locking: Split rwlock from spinlock headers Thomas Gleixner
2009-12-07 17:28 ` Arnd Bergmann
2009-12-06 18:02 ` [patch 03/23] locking: Separate rwlock api from spinlock api Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 04/23] locking: Convert raw_spinlock to arch_spinlock Thomas Gleixner
2009-12-06 18:10 ` Linus Torvalds
2009-12-06 18:19 ` Ingo Molnar
2009-12-06 18:02 ` [patch 05/23] locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 06/23] locking: Convert __raw_spin* functions to arch_spin* Thomas Gleixner
2009-12-06 18:02 ` [patch 07/23] locking: Convert raw_rwlock to arch_rwlock Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 08/23] locking: Convert raw_rwlock functions " Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-06 18:02 ` [patch 09/23] locking: Implement new raw_spinlock Thomas Gleixner
2009-12-08 13:33 ` Yong Zhang
2009-12-06 18:02 ` [patch 10/23] locking: Further name space cleanups Thomas Gleixner
2009-12-06 18:02 ` [patch 11/23] locking: Cleanup the name space completely Thomas Gleixner
2009-12-06 18:02 ` [patch 12/23] bkl: Fixup core_lock fallout Thomas Gleixner
2009-12-06 18:02 ` [patch 13/23] plist: Make plist debugging raw_spinlock aware Thomas Gleixner
2009-12-06 18:02 ` Thomas Gleixner
2009-12-07 1:21 ` Frederic Weisbecker
2009-12-06 18:03 ` [patch 14/23] sched: Convert rq->lock to raw_spinlock Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 15/23] sched: Convert rt_runtime_lock " Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 16/23] sched: Convert cpupri lock " Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 17/23] sched: Convert pi_lock " Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 18/23] rtmutes: Convert rtmutex.lock " Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 19/23] smp: Convert smplocks to raw_spinlocks Thomas Gleixner
2009-12-06 18:03 ` [patch 20/23] genirq: Convert irq_desc.lock to raw_spinlock Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 21/23] hrtimers: Convert to raw_spinlocks Thomas Gleixner
2009-12-06 18:03 ` [patch 22/23] perf_event: Convert to raw_spinlock Thomas Gleixner
2009-12-06 18:03 ` Thomas Gleixner
2009-12-06 18:03 ` [patch 23/23] debugobjects: Convert to raw_spinlocks Thomas Gleixner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).