From: Heiko Carstens <heiko.carstens@de.ibm.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Linus Torvalds <torvalds@linux-foundation.org>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org,
Martin Schwidefsky <schwidefsky@de.ibm.com>,
Heiko Carstens <heiko.carstens@de.ibm.com>
Subject: [patch 2/4] spinlock: add macro to generate out-of-line variants
Date: Tue, 11 Aug 2009 14:47:58 +0200 [thread overview]
Message-ID: <20090811124912.494596235@de.ibm.com> (raw)
In-Reply-To: 20090811124756.873490673@de.ibm.com
[-- Attachment #1: 02_spinlock_macros.diff --]
[-- Type: text/plain, Size: 7639 bytes --]
From: Heiko Carstens <heiko.carstens@de.ibm.com>
Since the bodies of the spinlock functions are in a header
file most functions in spinlock.c look like this:
int __lockfunc _spin_trylock(spinlock_t *lock)
{
return __spin_trylock(lock);
}
EXPORT_SYMBOL(_spin_trylock);
That's just a simple wrapper. Its the same for spin-,
read- and write-lock. So add an extra macro and generate
all versions automatically like it is already done for
the preemption friendly locks.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
---
kernel/spinlock.c | 233 +++++++++++++++---------------------------------------
1 file changed, 68 insertions(+), 165 deletions(-)
Index: linux-2.6/kernel/spinlock.c
===================================================================
--- linux-2.6.orig/kernel/spinlock.c
+++ linux-2.6/kernel/spinlock.c
@@ -21,23 +21,37 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
-int __lockfunc _spin_trylock(spinlock_t *lock)
-{
- return __spin_trylock(lock);
-}
-EXPORT_SYMBOL(_spin_trylock);
-
-int __lockfunc _read_trylock(rwlock_t *lock)
-{
- return __read_trylock(lock);
-}
-EXPORT_SYMBOL(_read_trylock);
-
-int __lockfunc _write_trylock(rwlock_t *lock)
-{
- return __write_trylock(lock);
-}
-EXPORT_SYMBOL(_write_trylock);
+#define BUILD_LOCK_OPS_COMMON(op, locktype) \
+int __lockfunc _##op##_trylock(locktype##_t *lock) \
+{ \
+ return __##op##_trylock(lock); \
+} \
+EXPORT_SYMBOL(_##op##_trylock); \
+ \
+void __lockfunc _##op##_unlock(locktype##_t *lock) \
+{ \
+ __##op##_unlock(lock); \
+} \
+EXPORT_SYMBOL(_##op##_unlock); \
+ \
+void __lockfunc _##op##_unlock_irq(locktype##_t *lock) \
+{ \
+ __##op##_unlock_irq(lock); \
+} \
+EXPORT_SYMBOL(_##op##_unlock_irq); \
+ \
+void __lockfunc _##op##_unlock_bh(locktype##_t *lock) \
+{ \
+ __##op##_unlock_bh(lock); \
+} \
+EXPORT_SYMBOL(_##op##_unlock_bh); \
+ \
+void __lockfunc _##op##_unlock_irqrestore(locktype##_t *lock, \
+ unsigned long flags) \
+{ \
+ __##op##_unlock_irqrestore(lock, flags); \
+} \
+EXPORT_SYMBOL(_##op##_unlock_irqrestore)
/*
* If lockdep is enabled then we use the non-preemption spin-ops
@@ -46,77 +60,30 @@ EXPORT_SYMBOL(_write_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-void __lockfunc _read_lock(rwlock_t *lock)
-{
- __read_lock(lock);
-}
-EXPORT_SYMBOL(_read_lock);
-
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
-{
- return __spin_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_spin_lock_irqsave);
-
-void __lockfunc _spin_lock_irq(spinlock_t *lock)
-{
- __spin_lock_irq(lock);
-}
-EXPORT_SYMBOL(_spin_lock_irq);
-
-void __lockfunc _spin_lock_bh(spinlock_t *lock)
-{
- __spin_lock_bh(lock);
-}
-EXPORT_SYMBOL(_spin_lock_bh);
-
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
-{
- return __read_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_read_lock_irqsave);
-
-void __lockfunc _read_lock_irq(rwlock_t *lock)
-{
- __read_lock_irq(lock);
-}
-EXPORT_SYMBOL(_read_lock_irq);
-
-void __lockfunc _read_lock_bh(rwlock_t *lock)
-{
- __read_lock_bh(lock);
-}
-EXPORT_SYMBOL(_read_lock_bh);
-
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
-{
- return __write_lock_irqsave(lock);
-}
-EXPORT_SYMBOL(_write_lock_irqsave);
-
-void __lockfunc _write_lock_irq(rwlock_t *lock)
-{
- __write_lock_irq(lock);
-}
-EXPORT_SYMBOL(_write_lock_irq);
-
-void __lockfunc _write_lock_bh(rwlock_t *lock)
-{
- __write_lock_bh(lock);
-}
-EXPORT_SYMBOL(_write_lock_bh);
-
-void __lockfunc _spin_lock(spinlock_t *lock)
-{
- __spin_lock(lock);
-}
-EXPORT_SYMBOL(_spin_lock);
-
-void __lockfunc _write_lock(rwlock_t *lock)
-{
- __write_lock(lock);
-}
-EXPORT_SYMBOL(_write_lock);
+#define BUILD_LOCK_OPS_DEP(op, locktype) \
+void __lockfunc _##op##_lock(locktype##_t *lock) \
+{ \
+ __##op##_lock(lock); \
+} \
+EXPORT_SYMBOL(_##op##_lock); \
+ \
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
+{ \
+ return __##op##_lock_irqsave(lock); \
+} \
+EXPORT_SYMBOL(_##op##_lock_irqsave); \
+ \
+void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
+{ \
+ __##op##_lock_irq(lock); \
+} \
+EXPORT_SYMBOL(_##op##_lock_irq); \
+ \
+void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
+{ \
+ __##op##_lock_bh(lock); \
+} \
+EXPORT_SYMBOL(_##op##_lock_bh)
#else /* CONFIG_PREEMPT: */
@@ -128,7 +95,7 @@ EXPORT_SYMBOL(_write_lock);
* (We do this in a function because inlining it would be excessive.)
*/
-#define BUILD_LOCK_OPS(op, locktype) \
+#define BUILD_LOCK_OPS_DEP(op, locktype) \
void __lockfunc _##op##_lock(locktype##_t *lock) \
{ \
for (;;) { \
@@ -193,21 +160,29 @@ void __lockfunc _##op##_lock_bh(locktype
\
EXPORT_SYMBOL(_##op##_lock_bh)
+#endif /* CONFIG_PREEMPT */
+
+#define BUILD_LOCK_OPS(op, locktype) \
+ BUILD_LOCK_OPS_COMMON(op, locktype); \
+ BUILD_LOCK_OPS_DEP(op, locktype);
+
/*
- * Build preemption-friendly versions of the following
- * lock-spinning functions:
+ * Build versions of the following lock-spinning functions:
*
* _[spin|read|write]_lock()
* _[spin|read|write]_lock_irq()
* _[spin|read|write]_lock_irqsave()
* _[spin|read|write]_lock_bh()
+ * _[spin|read|write]_trylock()
+ * _[spin|read|write]_unlock()
+ * _[spin|read|write]_unlock_irq()
+ * _[spin|read|write]_unlock_irqrestore()
+ * _[spin|read|write]_unlock_bh()
*/
BUILD_LOCK_OPS(spin, spinlock);
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
-#endif /* CONFIG_PREEMPT */
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
@@ -242,78 +217,6 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif
-void __lockfunc _spin_unlock(spinlock_t *lock)
-{
- __spin_unlock(lock);
-}
-EXPORT_SYMBOL(_spin_unlock);
-
-void __lockfunc _write_unlock(rwlock_t *lock)
-{
- __write_unlock(lock);
-}
-EXPORT_SYMBOL(_write_unlock);
-
-void __lockfunc _read_unlock(rwlock_t *lock)
-{
- __read_unlock(lock);
-}
-EXPORT_SYMBOL(_read_unlock);
-
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
-{
- __spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(_spin_unlock_irqrestore);
-
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)
-{
- __spin_unlock_irq(lock);
-}
-EXPORT_SYMBOL(_spin_unlock_irq);
-
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)
-{
- __spin_unlock_bh(lock);
-}
-EXPORT_SYMBOL(_spin_unlock_bh);
-
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- __read_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(_read_unlock_irqrestore);
-
-void __lockfunc _read_unlock_irq(rwlock_t *lock)
-{
- __read_unlock_irq(lock);
-}
-EXPORT_SYMBOL(_read_unlock_irq);
-
-void __lockfunc _read_unlock_bh(rwlock_t *lock)
-{
- __read_unlock_bh(lock);
-}
-EXPORT_SYMBOL(_read_unlock_bh);
-
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- __write_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(_write_unlock_irqrestore);
-
-void __lockfunc _write_unlock_irq(rwlock_t *lock)
-{
- __write_unlock_irq(lock);
-}
-EXPORT_SYMBOL(_write_unlock_irq);
-
-void __lockfunc _write_unlock_bh(rwlock_t *lock)
-{
- __write_unlock_bh(lock);
-}
-EXPORT_SYMBOL(_write_unlock_bh);
-
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
return __spin_trylock_bh(lock);
--
next prev parent reply other threads:[~2009-08-11 12:49 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-08-11 12:47 [patch 0/4] Allow inlined spinlocks again V2 Heiko Carstens
2009-08-11 12:47 ` [patch 1/4] spinlock: move spinlock function bodies to header file Heiko Carstens
2009-08-11 12:47 ` Heiko Carstens [this message]
2009-08-11 13:25 ` [patch 2/4] spinlock: add macro to generate out-of-line variants Arnd Bergmann
2009-08-11 13:35 ` Peter Zijlstra
2009-08-11 16:56 ` Heiko Carstens
2009-08-11 12:47 ` [patch 3/4] spinlock: allow inlined spinlocks Heiko Carstens
2009-08-11 12:48 ` [patch 4/4] spinlock: allow inline spinlocks on s390 Heiko Carstens
2009-08-11 13:00 ` [patch 0/4] Allow inlined spinlocks again V2 Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090811124912.494596235@de.ibm.com \
--to=heiko.carstens@de.ibm.com \
--cc=a.p.zijlstra@chello.nl \
--cc=akpm@linux-foundation.org \
--cc=linux-arch@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=schwidefsky@de.ibm.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox