public inbox for linux-arch@vger.kernel.org
 help / color / mirror / Atom feed
* [patch 1/2] Directed yield: cpu_relax variants for spinlocks and rw-locks.
@ 2006-09-15 11:30 Martin Schwidefsky
  2006-09-15 11:58 ` Paul Mackerras
  0 siblings, 1 reply; 3+ messages in thread
From: Martin Schwidefsky @ 2006-09-15 11:30 UTC (permalink / raw)
  To: linux-arch, mingo

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

[patch 1/2] Directed yield: cpu_relax variants for spinlocks and rw-locks.

In order to implement a spinlock that yields the cpu in favour of
the current lock holder cpu_relax variants for spinlocks and
read/write locks are needed. The new _raw_spin_relax, _raw_read_relax
and _raw_write_relax primitives have an additional argument: the
pointer to the lock structure.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
---

 include/asm-alpha/spinlock.h         |    4 ++++
 include/asm-arm/spinlock.h           |    4 ++++
 include/asm-cris/arch-v32/spinlock.h |    4 ++++
 include/asm-i386/spinlock.h          |    4 ++++
 include/asm-ia64/spinlock.h          |    4 ++++
 include/asm-m32r/spinlock.h          |    4 ++++
 include/asm-mips/spinlock.h          |    4 ++++
 include/asm-parisc/spinlock.h        |    4 ++++
 include/asm-powerpc/spinlock.h       |    4 ++++
 include/asm-ppc/spinlock.h           |    4 ++++
 include/asm-s390/spinlock.h          |    4 ++++
 include/asm-sh/spinlock.h            |    4 ++++
 include/asm-sparc/spinlock.h         |    4 ++++
 include/asm-sparc64/spinlock.h       |    4 ++++
 include/asm-x86_64/spinlock.h        |    4 ++++
 kernel/spinlock.c                    |    4 ++--
 16 files changed, 62 insertions(+), 2 deletions(-)

diff -urpN linux-2.6/include/asm-alpha/spinlock.h linux-2.6-patched/include/asm-alpha/spinlock.h
--- linux-2.6/include/asm-alpha/spinlock.h	2006-09-15 12:17:46.000000000 +0200
+++ linux-2.6-patched/include/asm-alpha/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -166,4 +166,8 @@ static inline void __raw_write_unlock(ra
 	lock->lock = 0;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* _ALPHA_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-arm/spinlock.h linux-2.6-patched/include/asm-arm/spinlock.h
--- linux-2.6/include/asm-arm/spinlock.h	2006-09-15 12:17:46.000000000 +0200
+++ linux-2.6-patched/include/asm-arm/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -218,4 +218,8 @@ static inline int __raw_read_trylock(raw
 /* read_can_lock - would read_trylock() succeed? */
 #define __raw_read_can_lock(x)		((x)->lock < 0x80000000)
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-cris/arch-v32/spinlock.h linux-2.6-patched/include/asm-cris/arch-v32/spinlock.h
--- linux-2.6/include/asm-cris/arch-v32/spinlock.h	2006-06-18 03:49:35.000000000 +0200
+++ linux-2.6-patched/include/asm-cris/arch-v32/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -160,4 +160,8 @@ static __inline__ int is_write_locked(rw
 	return rw->counter < 0;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_ARCH_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-i386/spinlock.h linux-2.6-patched/include/asm-i386/spinlock.h
--- linux-2.6/include/asm-i386/spinlock.h	2006-09-15 12:17:47.000000000 +0200
+++ linux-2.6-patched/include/asm-i386/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -199,4 +199,8 @@ static inline void __raw_write_unlock(ra
 				 : "+m" (rw->lock) : : "memory");
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-ia64/spinlock.h linux-2.6-patched/include/asm-ia64/spinlock.h
--- linux-2.6/include/asm-ia64/spinlock.h	2006-06-18 03:49:35.000000000 +0200
+++ linux-2.6-patched/include/asm-ia64/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -213,4 +213,8 @@ static inline int __raw_read_trylock(raw
 	return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /*  _ASM_IA64_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-m32r/spinlock.h linux-2.6-patched/include/asm-m32r/spinlock.h
--- linux-2.6/include/asm-m32r/spinlock.h	2006-09-15 12:17:47.000000000 +0200
+++ linux-2.6-patched/include/asm-m32r/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -309,4 +309,8 @@ static inline int __raw_write_trylock(ra
 	return 0;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif	/* _ASM_M32R_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-mips/spinlock.h linux-2.6-patched/include/asm-mips/spinlock.h
--- linux-2.6/include/asm-mips/spinlock.h	2006-06-18 03:49:35.000000000 +0200
+++ linux-2.6-patched/include/asm-mips/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -283,4 +283,8 @@ static inline int __raw_write_trylock(ra
 	return ret;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* _ASM_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-parisc/spinlock.h linux-2.6-patched/include/asm-parisc/spinlock.h
--- linux-2.6/include/asm-parisc/spinlock.h	2006-06-18 03:49:35.000000000 +0200
+++ linux-2.6-patched/include/asm-parisc/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -152,4 +152,8 @@ static __inline__ int __raw_write_can_lo
 	return !rw->counter;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-powerpc/spinlock.h linux-2.6-patched/include/asm-powerpc/spinlock.h
--- linux-2.6/include/asm-powerpc/spinlock.h	2006-09-15 12:17:47.000000000 +0200
+++ linux-2.6-patched/include/asm-powerpc/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -285,5 +285,9 @@ static __inline__ void __raw_write_unloc
 	rw->lock = 0;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-ppc/spinlock.h linux-2.6-patched/include/asm-ppc/spinlock.h
--- linux-2.6/include/asm-ppc/spinlock.h	2006-06-18 03:49:35.000000000 +0200
+++ linux-2.6-patched/include/asm-ppc/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -161,4 +161,8 @@ static __inline__ void __raw_write_unloc
 	rw->lock = 0;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-s390/spinlock.h linux-2.6-patched/include/asm-s390/spinlock.h
--- linux-2.6/include/asm-s390/spinlock.h	2006-06-18 03:49:35.000000000 +0200
+++ linux-2.6-patched/include/asm-s390/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -135,4 +135,8 @@ static inline int __raw_write_trylock(ra
 	return _raw_write_trylock_retry(rw);
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-sh/spinlock.h linux-2.6-patched/include/asm-sh/spinlock.h
--- linux-2.6/include/asm-sh/spinlock.h	2006-06-18 03:49:35.000000000 +0200
+++ linux-2.6-patched/include/asm-sh/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -100,4 +100,8 @@ static inline int __raw_write_trylock(ra
 	return 0;
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_SH_SPINLOCK_H */
diff -urpN linux-2.6/include/asm-sparc/spinlock.h linux-2.6-patched/include/asm-sparc/spinlock.h
--- linux-2.6/include/asm-sparc/spinlock.h	2006-09-15 12:17:48.000000000 +0200
+++ linux-2.6-patched/include/asm-sparc/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -154,6 +154,10 @@ static inline int __raw_write_trylock(ra
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
 #define __raw_write_can_lock(rw) (!(rw)->lock)
 
diff -urpN linux-2.6/include/asm-sparc64/spinlock.h linux-2.6-patched/include/asm-sparc64/spinlock.h
--- linux-2.6/include/asm-sparc64/spinlock.h	2006-09-15 12:17:48.000000000 +0200
+++ linux-2.6-patched/include/asm-sparc64/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -241,6 +241,10 @@ static int inline __write_trylock(raw_rw
 #define __raw_read_can_lock(rw)		(!((rw)->lock & 0x80000000UL))
 #define __raw_write_can_lock(rw)	(!(rw)->lock)
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__SPARC64_SPINLOCK_H) */
diff -urpN linux-2.6/include/asm-x86_64/spinlock.h linux-2.6-patched/include/asm-x86_64/spinlock.h
--- linux-2.6/include/asm-x86_64/spinlock.h	2006-09-15 12:17:48.000000000 +0200
+++ linux-2.6-patched/include/asm-x86_64/spinlock.h	2006-09-15 12:18:45.000000000 +0200
@@ -131,4 +131,8 @@ static inline void __raw_write_unlock(ra
 				: "=m" (rw->lock) : : "memory");
 }
 
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */
diff -urpN linux-2.6/kernel/spinlock.c linux-2.6-patched/kernel/spinlock.c
--- linux-2.6/kernel/spinlock.c	2006-09-15 12:17:52.000000000 +0200
+++ linux-2.6-patched/kernel/spinlock.c	2006-09-15 12:18:45.000000000 +0200
@@ -221,7 +221,7 @@ void __lockfunc _##op##_lock(locktype##_
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
 		while (!op##_can_lock(lock) && (lock)->break_lock)	\
-			cpu_relax();					\
+			_raw_##op##_relax(&lock->raw_lock);		\
 	}								\
 	(lock)->break_lock = 0;						\
 }									\
@@ -243,7 +243,7 @@ unsigned long __lockfunc _##op##_lock_ir
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
 		while (!op##_can_lock(lock) && (lock)->break_lock)	\
-			cpu_relax();					\
+			_raw_##op##_relax(&lock->raw_lock);		\
 	}								\
 	(lock)->break_lock = 0;						\
 	return flags;							\

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [patch 1/2] Directed yield: cpu_relax variants for spinlocks and rw-locks.
  2006-09-15 11:30 [patch 1/2] Directed yield: cpu_relax variants for spinlocks and rw-locks Martin Schwidefsky
@ 2006-09-15 11:58 ` Paul Mackerras
  2006-09-15 12:06   ` Martin Schwidefsky
  0 siblings, 1 reply; 3+ messages in thread
From: Paul Mackerras @ 2006-09-15 11:58 UTC (permalink / raw)
  To: Martin Schwidefsky; +Cc: linux-arch, mingo

Martin Schwidefsky writes:

> In order to implement a spinlock that yields the cpu in favour of
> the current lock holder cpu_relax variants for spinlocks and
> read/write locks are needed. The new _raw_spin_relax, _raw_read_relax
> and _raw_write_relax primitives have an additional argument: the
> pointer to the lock structure.

This will be useful on powerpc as well.  We need:

#define _raw_spin_relax(lock)	__spin_yield(lock)
#define _raw_read_relax(lock)	__rw_yield(lock)
#define _raw_write_relax(lock)	__rw_yield(lock)

in order to define your new primitives in terms of what we already
have defined in asm-powerpc/spinlock.h.

Thanks,
Paul.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [patch 1/2] Directed yield: cpu_relax variants for spinlocks and rw-locks.
  2006-09-15 11:58 ` Paul Mackerras
@ 2006-09-15 12:06   ` Martin Schwidefsky
  0 siblings, 0 replies; 3+ messages in thread
From: Martin Schwidefsky @ 2006-09-15 12:06 UTC (permalink / raw)
  To: Paul Mackerras; +Cc: linux-arch, mingo

On Fri, 2006-09-15 at 21:58 +1000, Paul Mackerras wrote:
> > In order to implement a spinlock that yields the cpu in favour of
> > the current lock holder cpu_relax variants for spinlocks and
> > read/write locks are needed. The new _raw_spin_relax, _raw_read_relax
> > and _raw_write_relax primitives have an additional argument: the
> > pointer to the lock structure.
> 
> This will be useful on powerpc as well.  We need:
> 
> #define _raw_spin_relax(lock)	__spin_yield(lock)
> #define _raw_read_relax(lock)	__rw_yield(lock)
> #define _raw_write_relax(lock)	__rw_yield(lock)
> 
> in order to define your new primitives in terms of what we already
> have defined in asm-powerpc/spinlock.h.

Ahh, I haven't seen that powerpc already has a __spin_yield. I'll change
the patch accordingly.

-- 
blue skies,
  Martin.

Martin Schwidefsky
Linux for zSeries Development & Services
IBM Deutschland Entwicklung GmbH

"Reality continues to ruin my life." - Calvin.



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2006-09-15 12:06 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-09-15 11:30 [patch 1/2] Directed yield: cpu_relax variants for spinlocks and rw-locks Martin Schwidefsky
2006-09-15 11:58 ` Paul Mackerras
2006-09-15 12:06   ` Martin Schwidefsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox