From: Heiko Carstens <heiko.carstens@de.ibm.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Ingo Molnar <mingo@elte.hu>,
Linus Torvalds <torvalds@linux-foundation.org>,
David Miller <davem@davemloft.net>, Benjamin Herrenschmidt <be>
Cc: linux-arch@vger.kernel.org,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Arnd Bergmann <arnd@arndb.de>,
Nick Piggin <nickpiggin@yahoo.com.au>,
Martin Schwidefsky <schwidefsky@de.ibm.com>,
Horst Hartmann <horsth@linux.vnet.ibm.com>,
Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>,
Heiko Carstens <heiko.carstens@de.ibm.com>
Subject: [patch 7/8] spinlock: allow inlined spinlocks
Date: Sat, 29 Aug 2009 12:21:22 +0200 [thread overview]
Message-ID: <20090829102158.211845784@de.ibm.com> (raw)
In-Reply-To: 20090829102115.638224800@de.ibm.com
[-- Attachment #1: 02_spinlock_ifdef.diff --]
[-- Type: text/plain, Size: 9197 bytes --]
From: Heiko Carstens <heiko.carstens@de.ibm.com>
This allows an architecture to specify per lock variant if the
locking code should be kept out-of-line or inlined.
If an architecure wants out-of-line locking code no change is
needed. To force inlining of e.g. spin_lock() the line
#define __spin_lock_is_small
needs to be added to arch/<whatever>/include/asm/spinlock.h
If CONFIG_DEBUG_SPINLOCK or CONFIG_GENERIC_LOCKBREAK are defined the
per architecture defines are (partly) ignored and still out-of-line
spinlock code will be generated.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
---
include/linux/spinlock_api_smp.h | 119 +++++++++++++++++++++++++++++++++++++++
kernel/spinlock.c | 56 ++++++++++++++++++
2 files changed, 175 insertions(+)
Index: linux-2.6/kernel/spinlock.c
===================================================================
--- linux-2.6.orig/kernel/spinlock.c
+++ linux-2.6/kernel/spinlock.c
@@ -21,23 +21,29 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
+#ifndef _spin_trylock
int __lockfunc _spin_trylock(spinlock_t *lock)
{
return __spin_trylock(lock);
}
EXPORT_SYMBOL(_spin_trylock);
+#endif
+#ifndef _read_trylock
int __lockfunc _read_trylock(rwlock_t *lock)
{
return __read_trylock(lock);
}
EXPORT_SYMBOL(_read_trylock);
+#endif
+#ifndef _write_trylock
int __lockfunc _write_trylock(rwlock_t *lock)
{
return __write_trylock(lock);
}
EXPORT_SYMBOL(_write_trylock);
+#endif
/*
* If lockdep is enabled then we use the non-preemption spin-ops
@@ -46,77 +52,101 @@ EXPORT_SYMBOL(_write_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#ifndef _read_lock
void __lockfunc _read_lock(rwlock_t *lock)
{
__read_lock(lock);
}
EXPORT_SYMBOL(_read_lock);
+#endif
+#ifndef _spin_lock_irqsave
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
{
return __spin_lock_irqsave(lock);
}
EXPORT_SYMBOL(_spin_lock_irqsave);
+#endif
+#ifndef _spin_lock_irq
void __lockfunc _spin_lock_irq(spinlock_t *lock)
{
__spin_lock_irq(lock);
}
EXPORT_SYMBOL(_spin_lock_irq);
+#endif
+#ifndef _spin_lock_bh
void __lockfunc _spin_lock_bh(spinlock_t *lock)
{
__spin_lock_bh(lock);
}
EXPORT_SYMBOL(_spin_lock_bh);
+#endif
+#ifndef _read_lock_irqsave
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
return __read_lock_irqsave(lock);
}
EXPORT_SYMBOL(_read_lock_irqsave);
+#endif
+#ifndef _read_lock_irq
void __lockfunc _read_lock_irq(rwlock_t *lock)
{
__read_lock_irq(lock);
}
EXPORT_SYMBOL(_read_lock_irq);
+#endif
+#ifndef _read_lock_bh
void __lockfunc _read_lock_bh(rwlock_t *lock)
{
__read_lock_bh(lock);
}
EXPORT_SYMBOL(_read_lock_bh);
+#endif
+#ifndef _write_lock_irqsave
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
return __write_lock_irqsave(lock);
}
EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
+#ifndef _write_lock_irq
void __lockfunc _write_lock_irq(rwlock_t *lock)
{
__write_lock_irq(lock);
}
EXPORT_SYMBOL(_write_lock_irq);
+#endif
+#ifndef _write_lock_bh
void __lockfunc _write_lock_bh(rwlock_t *lock)
{
__write_lock_bh(lock);
}
EXPORT_SYMBOL(_write_lock_bh);
+#endif
+#ifndef _spin_lock
void __lockfunc _spin_lock(spinlock_t *lock)
{
__spin_lock(lock);
}
EXPORT_SYMBOL(_spin_lock);
+#endif
+#ifndef _write_lock
void __lockfunc _write_lock(rwlock_t *lock)
{
__write_lock(lock);
}
EXPORT_SYMBOL(_write_lock);
+#endif
#else /* CONFIG_PREEMPT: */
@@ -242,83 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif
+#ifndef _spin_unlock
void __lockfunc _spin_unlock(spinlock_t *lock)
{
__spin_unlock(lock);
}
EXPORT_SYMBOL(_spin_unlock);
+#endif
+#ifndef _write_unlock
void __lockfunc _write_unlock(rwlock_t *lock)
{
__write_unlock(lock);
}
EXPORT_SYMBOL(_write_unlock);
+#endif
+#ifndef _read_unlock
void __lockfunc _read_unlock(rwlock_t *lock)
{
__read_unlock(lock);
}
EXPORT_SYMBOL(_read_unlock);
+#endif
+#ifndef _spin_unlock_irqrestore
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
__spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_spin_unlock_irqrestore);
+#endif
+#ifndef _spin_unlock_irq
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
__spin_unlock_irq(lock);
}
EXPORT_SYMBOL(_spin_unlock_irq);
+#endif
+#ifndef _spin_unlock_bh
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
__spin_unlock_bh(lock);
}
EXPORT_SYMBOL(_spin_unlock_bh);
+#endif
+#ifndef _read_unlock_irqrestore
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__read_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_read_unlock_irqrestore);
+#endif
+#ifndef _read_unlock_irq
void __lockfunc _read_unlock_irq(rwlock_t *lock)
{
__read_unlock_irq(lock);
}
EXPORT_SYMBOL(_read_unlock_irq);
+#endif
+#ifndef _read_unlock_bh
void __lockfunc _read_unlock_bh(rwlock_t *lock)
{
__read_unlock_bh(lock);
}
EXPORT_SYMBOL(_read_unlock_bh);
+#endif
+#ifndef _write_unlock_irqrestore
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__write_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_write_unlock_irqrestore);
+#endif
+#ifndef _write_unlock_irq
void __lockfunc _write_unlock_irq(rwlock_t *lock)
{
__write_unlock_irq(lock);
}
EXPORT_SYMBOL(_write_unlock_irq);
+#endif
+#ifndef _write_unlock_bh
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
__write_unlock_bh(lock);
}
EXPORT_SYMBOL(_write_unlock_bh);
+#endif
+#ifndef _spin_trylock_bh
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
return __spin_trylock_bh(lock);
}
EXPORT_SYMBOL(_spin_trylock_bh);
+#endif
notrace int in_lock_functions(unsigned long addr)
{
Index: linux-2.6/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6/include/linux/spinlock_api_smp.h
@@ -60,6 +60,125 @@ void __lockfunc _read_unlock_irqrestore(
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
+#ifndef CONFIG_DEBUG_SPINLOCK
+#ifndef CONFIG_GENERIC_LOCKBREAK
+
+#ifdef __spin_lock_is_small
+#define _spin_lock(lock) __spin_lock(lock)
+#endif
+
+#ifdef __read_lock_is_small
+#define _read_lock(lock) __read_lock(lock)
+#endif
+
+#ifdef __write_lock_is_small
+#define _write_lock(lock) __write_lock(lock)
+#endif
+
+#ifdef __spin_lock_bh_is_small
+#define _spin_lock_bh(lock) __spin_lock_bh(lock)
+#endif
+
+#ifdef __read_lock_bh_is_small
+#define _read_lock_bh(lock) __read_lock_bh(lock)
+#endif
+
+#ifdef __write_lock_bh_is_small
+#define _write_lock_bh(lock) __write_lock_bh(lock)
+#endif
+
+#ifdef __spin_lock_irq_is_small
+#define _spin_lock_irq(lock) __spin_lock_irq(lock)
+#endif
+
+#ifdef __read_lock_irq_is_small
+#define _read_lock_irq(lock) __read_lock_irq(lock)
+#endif
+
+#ifdef __write_lock_irq_is_small
+#define _write_lock_irq(lock) __write_lock_irq(lock)
+#endif
+
+#ifdef __spin_lock_irqsave_is_small
+#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
+#endif
+
+#ifdef __read_lock_irqsave_is_small
+#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#endif
+
+#ifdef __write_lock_irqsave_is_small
+#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#endif
+
+#endif /* !CONFIG_GENERIC_LOCKBREAK */
+
+#ifdef __spin_trylock_is_small
+#define _spin_trylock(lock) __spin_trylock(lock)
+#endif
+
+#ifdef __read_trylock_is_small
+#define _read_trylock(lock) __read_trylock(lock)
+#endif
+
+#ifdef __write_trylock_is_small
+#define _write_trylock(lock) __write_trylock(lock)
+#endif
+
+#ifdef __spin_trylock_bh_is_small
+#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#endif
+
+#ifdef __spin_unlock_is_small
+#define _spin_unlock(lock) __spin_unlock(lock)
+#endif
+
+#ifdef __read_unlock_is_small
+#define _read_unlock(lock) __read_unlock(lock)
+#endif
+
+#ifdef __write_unlock_is_small
+#define _write_unlock(lock) __write_unlock(lock)
+#endif
+
+#ifdef __spin_unlock_bh_is_small
+#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
+#endif
+
+#ifdef __read_unlock_bh_is_small
+#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#endif
+
+#ifdef __write_unlock_bh_is_small
+#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#endif
+
+#ifdef __spin_unlock_irq_is_small
+#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
+#endif
+
+#ifdef __read_unlock_irq_is_small
+#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#endif
+
+#ifdef __write_unlock_irq_is_small
+#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#endif
+
+#ifdef __spin_unlock_irqrestore_is_small
+#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __read_unlock_irqrestore_is_small
+#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __write_unlock_irqrestore_is_small
+#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#endif
+
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
static inline int __spin_trylock(spinlock_t *lock)
{
preempt_disable();
--
WARNING: multiple messages have this Message-ID (diff)
From: Heiko Carstens <heiko.carstens@de.ibm.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Ingo Molnar <mingo@elte.hu>,
Linus Torvalds <torvalds@linux-foundation.org>,
David Miller <davem@davemloft.net>,
Benjamin Herrenschmidt <benh@kernel.crashing.org>,
Paul Mackerras <paulus@samba.org>,
Geert Uytterhoeven <geert@linux-m68k.org>,
Roman Zippel <zippel@linux-m68k.org>
Cc: linux-arch@vger.kernel.org,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Arnd Bergmann <arnd@arndb.de>,
Nick Piggin <nickpiggin@yahoo.com.au>,
Martin Schwidefsky <schwidefsky@de.ibm.com>,
Horst Hartmann <horsth@linux.vnet.ibm.com>,
Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>,
Heiko Carstens <heiko.carstens@de.ibm.com>
Subject: [patch 7/8] spinlock: allow inlined spinlocks
Date: Sat, 29 Aug 2009 12:21:22 +0200 [thread overview]
Message-ID: <20090829102158.211845784@de.ibm.com> (raw)
Message-ID: <20090829102122.c8SeawStFXcWAGeqej-KyAoy2VTrn5kj3LWa9nFpNng@z> (raw)
In-Reply-To: 20090829102115.638224800@de.ibm.com
[-- Attachment #1: 02_spinlock_ifdef.diff --]
[-- Type: text/plain, Size: 9197 bytes --]
From: Heiko Carstens <heiko.carstens@de.ibm.com>
This allows an architecture to specify per lock variant if the
locking code should be kept out-of-line or inlined.
If an architecure wants out-of-line locking code no change is
needed. To force inlining of e.g. spin_lock() the line
#define __spin_lock_is_small
needs to be added to arch/<whatever>/include/asm/spinlock.h
If CONFIG_DEBUG_SPINLOCK or CONFIG_GENERIC_LOCKBREAK are defined the
per architecture defines are (partly) ignored and still out-of-line
spinlock code will be generated.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
---
include/linux/spinlock_api_smp.h | 119 +++++++++++++++++++++++++++++++++++++++
kernel/spinlock.c | 56 ++++++++++++++++++
2 files changed, 175 insertions(+)
Index: linux-2.6/kernel/spinlock.c
===================================================================
--- linux-2.6.orig/kernel/spinlock.c
+++ linux-2.6/kernel/spinlock.c
@@ -21,23 +21,29 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
+#ifndef _spin_trylock
int __lockfunc _spin_trylock(spinlock_t *lock)
{
return __spin_trylock(lock);
}
EXPORT_SYMBOL(_spin_trylock);
+#endif
+#ifndef _read_trylock
int __lockfunc _read_trylock(rwlock_t *lock)
{
return __read_trylock(lock);
}
EXPORT_SYMBOL(_read_trylock);
+#endif
+#ifndef _write_trylock
int __lockfunc _write_trylock(rwlock_t *lock)
{
return __write_trylock(lock);
}
EXPORT_SYMBOL(_write_trylock);
+#endif
/*
* If lockdep is enabled then we use the non-preemption spin-ops
@@ -46,77 +52,101 @@ EXPORT_SYMBOL(_write_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#ifndef _read_lock
void __lockfunc _read_lock(rwlock_t *lock)
{
__read_lock(lock);
}
EXPORT_SYMBOL(_read_lock);
+#endif
+#ifndef _spin_lock_irqsave
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
{
return __spin_lock_irqsave(lock);
}
EXPORT_SYMBOL(_spin_lock_irqsave);
+#endif
+#ifndef _spin_lock_irq
void __lockfunc _spin_lock_irq(spinlock_t *lock)
{
__spin_lock_irq(lock);
}
EXPORT_SYMBOL(_spin_lock_irq);
+#endif
+#ifndef _spin_lock_bh
void __lockfunc _spin_lock_bh(spinlock_t *lock)
{
__spin_lock_bh(lock);
}
EXPORT_SYMBOL(_spin_lock_bh);
+#endif
+#ifndef _read_lock_irqsave
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
return __read_lock_irqsave(lock);
}
EXPORT_SYMBOL(_read_lock_irqsave);
+#endif
+#ifndef _read_lock_irq
void __lockfunc _read_lock_irq(rwlock_t *lock)
{
__read_lock_irq(lock);
}
EXPORT_SYMBOL(_read_lock_irq);
+#endif
+#ifndef _read_lock_bh
void __lockfunc _read_lock_bh(rwlock_t *lock)
{
__read_lock_bh(lock);
}
EXPORT_SYMBOL(_read_lock_bh);
+#endif
+#ifndef _write_lock_irqsave
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
return __write_lock_irqsave(lock);
}
EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
+#ifndef _write_lock_irq
void __lockfunc _write_lock_irq(rwlock_t *lock)
{
__write_lock_irq(lock);
}
EXPORT_SYMBOL(_write_lock_irq);
+#endif
+#ifndef _write_lock_bh
void __lockfunc _write_lock_bh(rwlock_t *lock)
{
__write_lock_bh(lock);
}
EXPORT_SYMBOL(_write_lock_bh);
+#endif
+#ifndef _spin_lock
void __lockfunc _spin_lock(spinlock_t *lock)
{
__spin_lock(lock);
}
EXPORT_SYMBOL(_spin_lock);
+#endif
+#ifndef _write_lock
void __lockfunc _write_lock(rwlock_t *lock)
{
__write_lock(lock);
}
EXPORT_SYMBOL(_write_lock);
+#endif
#else /* CONFIG_PREEMPT: */
@@ -242,83 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif
+#ifndef _spin_unlock
void __lockfunc _spin_unlock(spinlock_t *lock)
{
__spin_unlock(lock);
}
EXPORT_SYMBOL(_spin_unlock);
+#endif
+#ifndef _write_unlock
void __lockfunc _write_unlock(rwlock_t *lock)
{
__write_unlock(lock);
}
EXPORT_SYMBOL(_write_unlock);
+#endif
+#ifndef _read_unlock
void __lockfunc _read_unlock(rwlock_t *lock)
{
__read_unlock(lock);
}
EXPORT_SYMBOL(_read_unlock);
+#endif
+#ifndef _spin_unlock_irqrestore
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
__spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_spin_unlock_irqrestore);
+#endif
+#ifndef _spin_unlock_irq
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
__spin_unlock_irq(lock);
}
EXPORT_SYMBOL(_spin_unlock_irq);
+#endif
+#ifndef _spin_unlock_bh
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
__spin_unlock_bh(lock);
}
EXPORT_SYMBOL(_spin_unlock_bh);
+#endif
+#ifndef _read_unlock_irqrestore
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__read_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_read_unlock_irqrestore);
+#endif
+#ifndef _read_unlock_irq
void __lockfunc _read_unlock_irq(rwlock_t *lock)
{
__read_unlock_irq(lock);
}
EXPORT_SYMBOL(_read_unlock_irq);
+#endif
+#ifndef _read_unlock_bh
void __lockfunc _read_unlock_bh(rwlock_t *lock)
{
__read_unlock_bh(lock);
}
EXPORT_SYMBOL(_read_unlock_bh);
+#endif
+#ifndef _write_unlock_irqrestore
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__write_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_write_unlock_irqrestore);
+#endif
+#ifndef _write_unlock_irq
void __lockfunc _write_unlock_irq(rwlock_t *lock)
{
__write_unlock_irq(lock);
}
EXPORT_SYMBOL(_write_unlock_irq);
+#endif
+#ifndef _write_unlock_bh
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
__write_unlock_bh(lock);
}
EXPORT_SYMBOL(_write_unlock_bh);
+#endif
+#ifndef _spin_trylock_bh
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
return __spin_trylock_bh(lock);
}
EXPORT_SYMBOL(_spin_trylock_bh);
+#endif
notrace int in_lock_functions(unsigned long addr)
{
Index: linux-2.6/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6/include/linux/spinlock_api_smp.h
@@ -60,6 +60,125 @@ void __lockfunc _read_unlock_irqrestore(
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
+#ifndef CONFIG_DEBUG_SPINLOCK
+#ifndef CONFIG_GENERIC_LOCKBREAK
+
+#ifdef __spin_lock_is_small
+#define _spin_lock(lock) __spin_lock(lock)
+#endif
+
+#ifdef __read_lock_is_small
+#define _read_lock(lock) __read_lock(lock)
+#endif
+
+#ifdef __write_lock_is_small
+#define _write_lock(lock) __write_lock(lock)
+#endif
+
+#ifdef __spin_lock_bh_is_small
+#define _spin_lock_bh(lock) __spin_lock_bh(lock)
+#endif
+
+#ifdef __read_lock_bh_is_small
+#define _read_lock_bh(lock) __read_lock_bh(lock)
+#endif
+
+#ifdef __write_lock_bh_is_small
+#define _write_lock_bh(lock) __write_lock_bh(lock)
+#endif
+
+#ifdef __spin_lock_irq_is_small
+#define _spin_lock_irq(lock) __spin_lock_irq(lock)
+#endif
+
+#ifdef __read_lock_irq_is_small
+#define _read_lock_irq(lock) __read_lock_irq(lock)
+#endif
+
+#ifdef __write_lock_irq_is_small
+#define _write_lock_irq(lock) __write_lock_irq(lock)
+#endif
+
+#ifdef __spin_lock_irqsave_is_small
+#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
+#endif
+
+#ifdef __read_lock_irqsave_is_small
+#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#endif
+
+#ifdef __write_lock_irqsave_is_small
+#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#endif
+
+#endif /* !CONFIG_GENERIC_LOCKBREAK */
+
+#ifdef __spin_trylock_is_small
+#define _spin_trylock(lock) __spin_trylock(lock)
+#endif
+
+#ifdef __read_trylock_is_small
+#define _read_trylock(lock) __read_trylock(lock)
+#endif
+
+#ifdef __write_trylock_is_small
+#define _write_trylock(lock) __write_trylock(lock)
+#endif
+
+#ifdef __spin_trylock_bh_is_small
+#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#endif
+
+#ifdef __spin_unlock_is_small
+#define _spin_unlock(lock) __spin_unlock(lock)
+#endif
+
+#ifdef __read_unlock_is_small
+#define _read_unlock(lock) __read_unlock(lock)
+#endif
+
+#ifdef __write_unlock_is_small
+#define _write_unlock(lock) __write_unlock(lock)
+#endif
+
+#ifdef __spin_unlock_bh_is_small
+#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
+#endif
+
+#ifdef __read_unlock_bh_is_small
+#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#endif
+
+#ifdef __write_unlock_bh_is_small
+#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#endif
+
+#ifdef __spin_unlock_irq_is_small
+#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
+#endif
+
+#ifdef __read_unlock_irq_is_small
+#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#endif
+
+#ifdef __write_unlock_irq_is_small
+#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#endif
+
+#ifdef __spin_unlock_irqrestore_is_small
+#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __read_unlock_irqrestore_is_small
+#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __write_unlock_irqrestore_is_small
+#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#endif
+
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
static inline int __spin_trylock(spinlock_t *lock)
{
preempt_disable();
--
next prev parent reply other threads:[~2009-08-29 10:22 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-08-29 10:21 [patch 0/8] Allow inlined spinlocks again V5 Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 10:21 ` [patch 1/8] powerpc: rename __spin_try_lock() and friends Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 10:21 ` [patch 2/8] sparc: " Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 22:27 ` David Miller
2009-08-29 10:21 ` [patch 3/8] m68k/asm-offsets: rename pt_regs offset defines Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 10:21 ` [patch 4/8] m68k/asm-offsets: rename signal defines Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 10:21 ` [patch 5/8] m68k: calculate thread_info offset with asm offset Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 10:21 ` [patch 6/8] spinlock: move spinlock function bodies to header file Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens [this message]
2009-08-29 10:21 ` [patch 7/8] spinlock: allow inlined spinlocks Heiko Carstens
2009-08-29 10:21 ` [patch 8/8] spinlock: inline code for all locking variants on s390 Heiko Carstens
2009-08-29 10:21 ` Heiko Carstens
2009-08-29 11:16 ` [patch 0/8] Allow inlined spinlocks again V5 Ingo Molnar
2009-08-29 13:59 ` Heiko Carstens
2009-08-29 14:13 ` Ingo Molnar
2009-08-31 8:59 ` Heiko Carstens
2009-09-01 13:19 ` Ingo Molnar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090829102158.211845784@de.ibm.com \
--to=heiko.carstens@de.ibm.com \
--cc=a.p.zijlstra@chello.nl \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=davem@davemloft.net \
--cc=ehrhardt@linux.vnet.ibm.com \
--cc=horsth@linux.vnet.ibm.com \
--cc=linux-arch@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=nickpiggin@yahoo.com.au \
--cc=schwidefsky@de.ibm.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).