linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Heiko Carstens <heiko.carstens@de.ibm.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Ingo Molnar <mingo@elte.hu>,
	linux-arch@vger.kernel.org,
	Martin Schwidefsky <schwidefsky@de.ibm.com>,
	Heiko Carstens <heiko.carstens@de.ibm.com>,
	Arnd Bergmann <arnd@arndb.de>,
	Horst Hartmann <horsth@linux.vnet.ibm.com>,
	Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>,
	Nick Piggin <nickpiggin@yahoo.com.au>
Subject: [patch 2/3] spinlock: allow inlined spinlocks
Date: Fri, 14 Aug 2009 14:58:03 +0200	[thread overview]
Message-ID: <20090814125857.181021997@de.ibm.com> (raw)
In-Reply-To: 20090814125801.881618121@de.ibm.com

[-- Attachment #1: 02_spinlock_ifdef.diff --]
[-- Type: text/plain, Size: 11770 bytes --]

From: Heiko Carstens <heiko.carstens@de.ibm.com>

This allows an architecture to specify per lock variant if the
locking code should be kept out-of-line or inlined.

If an architecure wants out-of-line locking code no change is
needed. To force inlining of e.g. spin_lock() the line

#define __spin_lock_is_small

needs to be added to arch/<whatever>/include/asm/spinlock.h

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
---
 include/linux/spinlock_api_smp.h |  146 ++++++++++++++++++++++++++++++++++++++-
 kernel/spinlock.c                |   56 ++++++++++++++
 2 files changed, 199 insertions(+), 3 deletions(-)

Index: linux-2.6/include/linux/spinlock_api_smp.h
===================================================================
--- linux-2.6.orig/include/linux/spinlock_api_smp.h
+++ linux-2.6/include/linux/spinlock_api_smp.h
@@ -19,46 +19,186 @@ int in_lock_functions(unsigned long addr
 
 #define assert_spin_locked(x)	BUG_ON(!spin_is_locked(x))
 
-void __lockfunc _spin_lock(spinlock_t *lock)		__acquires(lock);
 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
 							__acquires(lock);
 void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
 							__acquires(lock);
+unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+							__acquires(lock);
+
+#ifdef __spin_lock_is_small
+#define _spin_lock(lock) __spin_lock(lock)
+#else
+void __lockfunc _spin_lock(spinlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __read_lock_is_small
+#define _read_lock(lock) __read_lock(lock)
+#else
 void __lockfunc _read_lock(rwlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __write_lock_is_small
+#define _write_lock(lock) __write_lock(lock)
+#else
 void __lockfunc _write_lock(rwlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __spin_lock_bh_is_small
+#define _spin_lock_bh(lock) __spin_lock_bh(lock)
+#else
 void __lockfunc _spin_lock_bh(spinlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __read_lock_bh_is_small
+#define _read_lock_bh(lock) __read_lock_bh(lock)
+#else
 void __lockfunc _read_lock_bh(rwlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __write_lock_bh_is_small
+#define _write_lock_bh(lock) __write_lock_bh(lock)
+#else
 void __lockfunc _write_lock_bh(rwlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __spin_lock_irq_is_small
+#define _spin_lock_irq(lock) __spin_lock_irq(lock)
+#else
 void __lockfunc _spin_lock_irq(spinlock_t *lock)	__acquires(lock);
+#endif
+
+#ifdef __read_lock_irq_is_small
+#define _read_lock_irq(lock) __read_lock_irq(lock)
+#else
 void __lockfunc _read_lock_irq(rwlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __write_lock_irq_is_small
+#define _write_lock_irq(lock) __write_lock_irq(lock)
+#else
 void __lockfunc _write_lock_irq(rwlock_t *lock)		__acquires(lock);
+#endif
+
+#ifdef __spin_lock_irqsave_is_small
+#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
+#else
 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
 							__acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
-							__acquires(lock);
+#endif
+
+#ifdef __read_lock_irqsave_is_small
+#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#else
 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
 							__acquires(lock);
+#endif
+
+#ifdef __write_lock_irqsave_is_small
+#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#else
 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
 							__acquires(lock);
+#endif
+
+#ifdef __spin_trylock_is_small
+#define _spin_trylock(lock) __spin_trylock(lock)
+#else
 int __lockfunc _spin_trylock(spinlock_t *lock);
+#endif
+
+#ifdef __read_trylock_is_small
+#define _read_trylock(lock) __read_trylock(lock)
+#else
 int __lockfunc _read_trylock(rwlock_t *lock);
+#endif
+
+#ifdef __write_trylock_is_small
+#define _write_trylock(lock) __write_trylock(lock)
+#else
 int __lockfunc _write_trylock(rwlock_t *lock);
+#endif
+
+#ifdef __spin_trylock_bh_is_small
+#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#else
 int __lockfunc _spin_trylock_bh(spinlock_t *lock);
+#endif
+
+#ifdef __spin_unlock_is_small
+#define _spin_unlock(lock) __spin_unlock(lock)
+#else
 void __lockfunc _spin_unlock(spinlock_t *lock)		__releases(lock);
+#endif
+
+#ifdef __read_unlock_is_small
+#define _read_unlock(lock) __read_unlock(lock)
+#else
 void __lockfunc _read_unlock(rwlock_t *lock)		__releases(lock);
+#endif
+
+#ifdef __write_unlock_is_small
+#define _write_unlock(lock) __write_unlock(lock)
+#else
 void __lockfunc _write_unlock(rwlock_t *lock)		__releases(lock);
+#endif
+
+#ifdef __spin_unlock_bh_is_small
+#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
+#else
 void __lockfunc _spin_unlock_bh(spinlock_t *lock)	__releases(lock);
+#endif
+
+#ifdef __read_unlock_bh_is_small
+#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#else
 void __lockfunc _read_unlock_bh(rwlock_t *lock)		__releases(lock);
+#endif
+
+#ifdef __write_unlock_bh_is_small
+#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#else
 void __lockfunc _write_unlock_bh(rwlock_t *lock)	__releases(lock);
+#endif
+
+#ifdef __spin_unlock_irq_is_small
+#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
+#else
 void __lockfunc _spin_unlock_irq(spinlock_t *lock)	__releases(lock);
+#endif
+
+#ifdef __read_unlock_irq_is_small
+#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#else
 void __lockfunc _read_unlock_irq(rwlock_t *lock)	__releases(lock);
+#endif
+
+#ifdef __write_unlock_irq_is_small
+#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#else
 void __lockfunc _write_unlock_irq(rwlock_t *lock)	__releases(lock);
+#endif
+
+#ifdef __spin_unlock_irqrestore_is_small
+#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
+#else
 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 							__releases(lock);
+#endif
+
+#ifdef __read_unlock_irqrestore_is_small
+#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#else
 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 							__releases(lock);
+#endif
+
+#ifdef __write_unlock_irqrestore_is_small
+#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#else
 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 							__releases(lock);
+#endif
 
 static inline int __spin_trylock(spinlock_t *lock)
 {
Index: linux-2.6/kernel/spinlock.c
===================================================================
--- linux-2.6.orig/kernel/spinlock.c
+++ linux-2.6/kernel/spinlock.c
@@ -21,23 +21,29 @@
 #include <linux/debug_locks.h>
 #include <linux/module.h>
 
+#ifndef __spin_trylock_is_small
 int __lockfunc _spin_trylock(spinlock_t *lock)
 {
 	return __spin_trylock(lock);
 }
 EXPORT_SYMBOL(_spin_trylock);
+#endif
 
+#ifndef __read_trylock_is_small
 int __lockfunc _read_trylock(rwlock_t *lock)
 {
 	return __read_trylock(lock);
 }
 EXPORT_SYMBOL(_read_trylock);
+#endif
 
+#ifndef __write_trylock_is_small
 int __lockfunc _write_trylock(rwlock_t *lock)
 {
 	return __write_trylock(lock);
 }
 EXPORT_SYMBOL(_write_trylock);
+#endif
 
 /*
  * If lockdep is enabled then we use the non-preemption spin-ops
@@ -46,77 +52,101 @@ EXPORT_SYMBOL(_write_trylock);
  */
 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
 
+#ifndef __read_lock_is_small
 void __lockfunc _read_lock(rwlock_t *lock)
 {
 	__read_lock(lock);
 }
 EXPORT_SYMBOL(_read_lock);
+#endif
 
+#ifndef __spin_lock_irqsave_is_small
 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
 {
 	return __spin_lock_irqsave(lock);
 }
 EXPORT_SYMBOL(_spin_lock_irqsave);
+#endif
 
+#ifndef __spin_lock_irq_is_small
 void __lockfunc _spin_lock_irq(spinlock_t *lock)
 {
 	__spin_lock_irq(lock);
 }
 EXPORT_SYMBOL(_spin_lock_irq);
+#endif
 
+#ifndef __spin_lock_bh_is_small
 void __lockfunc _spin_lock_bh(spinlock_t *lock)
 {
 	__spin_lock_bh(lock);
 }
 EXPORT_SYMBOL(_spin_lock_bh);
+#endif
 
+#ifndef __read_lock_irqsave_is_small
 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
 {
 	return __read_lock_irqsave(lock);
 }
 EXPORT_SYMBOL(_read_lock_irqsave);
+#endif
 
+#ifndef __read_lock_irq_is_small
 void __lockfunc _read_lock_irq(rwlock_t *lock)
 {
 	__read_lock_irq(lock);
 }
 EXPORT_SYMBOL(_read_lock_irq);
+#endif
 
+#ifndef __read_lock_bh_is_small
 void __lockfunc _read_lock_bh(rwlock_t *lock)
 {
 	__read_lock_bh(lock);
 }
 EXPORT_SYMBOL(_read_lock_bh);
+#endif
 
+#ifndef __write_lock_irqsave_is_small
 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
 {
 	return __write_lock_irqsave(lock);
 }
 EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
 
+#ifndef __write_lock_irq_is_small
 void __lockfunc _write_lock_irq(rwlock_t *lock)
 {
 	__write_lock_irq(lock);
 }
 EXPORT_SYMBOL(_write_lock_irq);
+#endif
 
+#ifndef __write_lock_bh_is_small
 void __lockfunc _write_lock_bh(rwlock_t *lock)
 {
 	__write_lock_bh(lock);
 }
 EXPORT_SYMBOL(_write_lock_bh);
+#endif
 
+#ifndef __spin_lock_is_small
 void __lockfunc _spin_lock(spinlock_t *lock)
 {
 	__spin_lock(lock);
 }
 EXPORT_SYMBOL(_spin_lock);
+#endif
 
+#ifndef __write_lock_is_small
 void __lockfunc _write_lock(rwlock_t *lock)
 {
 	__write_lock(lock);
 }
 EXPORT_SYMBOL(_write_lock);
+#endif
 
 #else /* CONFIG_PREEMPT: */
 
@@ -242,83 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
 
 #endif
 
+#ifndef __spin_unlock_is_small
 void __lockfunc _spin_unlock(spinlock_t *lock)
 {
 	__spin_unlock(lock);
 }
 EXPORT_SYMBOL(_spin_unlock);
+#endif
 
+#ifndef __write_unlock_is_small
 void __lockfunc _write_unlock(rwlock_t *lock)
 {
 	__write_unlock(lock);
 }
 EXPORT_SYMBOL(_write_unlock);
+#endif
 
+#ifndef __read_unlock_is_small
 void __lockfunc _read_unlock(rwlock_t *lock)
 {
 	__read_unlock(lock);
 }
 EXPORT_SYMBOL(_read_unlock);
+#endif
 
+#ifndef __spin_unlock_irqrestore_is_small
 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 {
 	__spin_unlock_irqrestore(lock, flags);
 }
 EXPORT_SYMBOL(_spin_unlock_irqrestore);
+#endif
 
+#ifndef __spin_unlock_irq_is_small
 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
 {
 	__spin_unlock_irq(lock);
 }
 EXPORT_SYMBOL(_spin_unlock_irq);
+#endif
 
+#ifndef __spin_unlock_bh_is_small
 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
 {
 	__spin_unlock_bh(lock);
 }
 EXPORT_SYMBOL(_spin_unlock_bh);
+#endif
 
+#ifndef __read_unlock_irqrestore_is_small
 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 {
 	__read_unlock_irqrestore(lock, flags);
 }
 EXPORT_SYMBOL(_read_unlock_irqrestore);
+#endif
 
+#ifndef __read_unlock_irq_is_small
 void __lockfunc _read_unlock_irq(rwlock_t *lock)
 {
 	__read_unlock_irq(lock);
 }
 EXPORT_SYMBOL(_read_unlock_irq);
+#endif
 
+#ifndef __read_unlock_bh_is_small
 void __lockfunc _read_unlock_bh(rwlock_t *lock)
 {
 	__read_unlock_bh(lock);
 }
 EXPORT_SYMBOL(_read_unlock_bh);
+#endif
 
+#ifndef __write_unlock_irqrestore_is_small
 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 {
 	__write_unlock_irqrestore(lock, flags);
 }
 EXPORT_SYMBOL(_write_unlock_irqrestore);
+#endif
 
+#ifndef __write_unlock_irq_is_small
 void __lockfunc _write_unlock_irq(rwlock_t *lock)
 {
 	__write_unlock_irq(lock);
 }
 EXPORT_SYMBOL(_write_unlock_irq);
+#endif
 
+#ifndef __write_unlock_bh_is_small
 void __lockfunc _write_unlock_bh(rwlock_t *lock)
 {
 	__write_unlock_bh(lock);
 }
 EXPORT_SYMBOL(_write_unlock_bh);
+#endif
 
+#ifndef __spin_trylock_bh_is_small
 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
 {
 	return __spin_trylock_bh(lock);
 }
 EXPORT_SYMBOL(_spin_trylock_bh);
+#endif
 
 notrace int in_lock_functions(unsigned long addr)
 {

-- 

  parent reply	other threads:[~2009-08-14 12:59 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-08-14 12:58 [patch 0/3] Allow inlined spinlocks again V4 Heiko Carstens
2009-08-14 12:58 ` [patch 1/3] spinlock: move spinlock function bodies to header file Heiko Carstens
2009-08-14 12:58 ` Heiko Carstens [this message]
2009-08-16 17:57   ` [patch 2/3] spinlock: allow inlined spinlocks Heiko Carstens
2009-08-16 18:06     ` Ingo Molnar
2009-08-16 18:43       ` Linus Torvalds
2009-08-16 20:24         ` Ingo Molnar
2009-08-16 21:07           ` Linus Torvalds
2009-08-16 21:18             ` Ingo Molnar
2009-08-16 18:44       ` Heiko Carstens
2009-08-16 20:48         ` Ingo Molnar
2009-08-16 21:33           ` Heiko Carstens
2009-08-16 21:36             ` Ingo Molnar
2009-08-16 18:22     ` Linus Torvalds
2009-08-17 15:46       ` Heiko Carstens
2009-08-14 12:58 ` [patch 3/3] spinlock: inline code for all locking variants on s390 Heiko Carstens
  -- strict thread matches above, loose matches on Subject: below --
2009-08-12 18:39 [patch 0/3] Allow inlined spinlocks again V3 Heiko Carstens
2009-08-12 18:39 ` [patch 2/3] spinlock: allow inlined spinlocks Heiko Carstens

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090814125857.181021997@de.ibm.com \
    --to=heiko.carstens@de.ibm.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=ehrhardt@linux.vnet.ibm.com \
    --cc=horsth@linux.vnet.ibm.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=nickpiggin@yahoo.com.au \
    --cc=schwidefsky@de.ibm.com \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).