public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86: slightly shorten __ticket_spin_trylock()
@ 2009-11-27 15:09 Jan Beulich
  2009-12-02 10:45 ` [tip:core/locking] locking, x86: Slightly " tip-bot for Jan Beulich
  0 siblings, 1 reply; 18+ messages in thread
From: Jan Beulich @ 2009-11-27 15:09 UTC (permalink / raw)
  To: mingo, tglx, hpa; +Cc: linux-kernel

Since the callers generally expect a boolean value, there's no need to
zero-extend the outcome of the comparison. It just requires that all of
x86' trylock implementations return bool instead of int.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

---
 arch/x86/include/asm/paravirt.h       |    4 ++--
 arch/x86/include/asm/paravirt_types.h |    2 +-
 arch/x86/include/asm/spinlock.h       |   17 ++++++++---------
 arch/x86/xen/spinlock.c               |    2 +-
 4 files changed, 12 insertions(+), 13 deletions(-)

--- linux-2.6.32-rc8/arch/x86/include/asm/paravirt.h
+++ 2.6.32-rc8-x86-spin-trylock-simplify/arch/x86/include/asm/paravirt.h
@@ -753,9 +753,9 @@ static __always_inline void __raw_spin_l
 	PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
 }
 
-static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
+static __always_inline bool __raw_spin_trylock(struct raw_spinlock *lock)
 {
-	return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
+	return PVOP_CALL1(bool, pv_lock_ops.spin_trylock, lock);
 }
 
 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
--- linux-2.6.32-rc8/arch/x86/include/asm/paravirt_types.h
+++ 2.6.32-rc8-x86-spin-trylock-simplify/arch/x86/include/asm/paravirt_types.h
@@ -324,7 +324,7 @@ struct pv_lock_ops {
 	int (*spin_is_contended)(struct raw_spinlock *lock);
 	void (*spin_lock)(struct raw_spinlock *lock);
 	void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
-	int (*spin_trylock)(struct raw_spinlock *lock);
+	bool (*spin_trylock)(struct raw_spinlock *lock);
 	void (*spin_unlock)(struct raw_spinlock *lock);
 };
 
--- linux-2.6.32-rc8/arch/x86/include/asm/spinlock.h
+++ 2.6.32-rc8-x86-spin-trylock-simplify/arch/x86/include/asm/spinlock.h
@@ -77,9 +77,10 @@ static __always_inline void __ticket_spi
 		: "memory", "cc");
 }
 
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline bool __ticket_spin_trylock(raw_spinlock_t *lock)
 {
-	int tmp, new;
+	int tmp;
+	union { int i; bool b; } new;
 
 	asm volatile("movzwl %2, %0\n\t"
 		     "cmpb %h0,%b0\n\t"
@@ -88,12 +89,11 @@ static __always_inline int __ticket_spin
 		     LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
 		     "1:"
 		     "sete %b1\n\t"
-		     "movzbl %b1,%0\n\t"
 		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
 		     :
 		     : "memory", "cc");
 
-	return tmp;
+	return new.b;
 }
 
 static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
@@ -127,10 +127,10 @@ static __always_inline void __ticket_spi
 		     : "memory", "cc");
 }
 
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline bool __ticket_spin_trylock(raw_spinlock_t *lock)
 {
 	int tmp;
-	int new;
+	union { int i; bool b; } new;
 
 	asm volatile("movl %2,%0\n\t"
 		     "movl %0,%1\n\t"
@@ -141,12 +141,11 @@ static __always_inline int __ticket_spin
 		     LOCK_PREFIX "cmpxchgl %1,%2\n\t"
 		     "1:"
 		     "sete %b1\n\t"
-		     "movzbl %b1,%0\n\t"
 		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
 		     :
 		     : "memory", "cc");
 
-	return tmp;
+	return new.b;
 }
 
 static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
@@ -190,7 +189,7 @@ static __always_inline void __raw_spin_l
 	__ticket_spin_lock(lock);
 }
 
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline bool __raw_spin_trylock(raw_spinlock_t *lock)
 {
 	return __ticket_spin_trylock(lock);
 }
--- linux-2.6.32-rc8/arch/x86/xen/spinlock.c
+++ 2.6.32-rc8-x86-spin-trylock-simplify/arch/x86/xen/spinlock.c
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct 
 	return xl->spinners != 0;
 }
 
-static int xen_spin_trylock(struct raw_spinlock *lock)
+static bool xen_spin_trylock(struct raw_spinlock *lock)
 {
 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
 	u8 old = 1;




^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2009-12-02 17:59 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-11-27 15:09 [PATCH] x86: slightly shorten __ticket_spin_trylock() Jan Beulich
2009-12-02 10:45 ` [tip:core/locking] locking, x86: Slightly " tip-bot for Jan Beulich
2009-12-02 13:29   ` Ingo Molnar
2009-12-02 14:06     ` Jan Beulich
2009-12-02 14:12       ` Avi Kivity
2009-12-02 14:25         ` Jan Beulich
2009-12-02 14:36           ` Avi Kivity
2009-12-02 14:59             ` Jan Beulich
2009-12-02 14:21       ` Ingo Molnar
2009-12-02 14:57         ` Jan Beulich
2009-12-02 15:33           ` Linus Torvalds
2009-12-02 15:26   ` Linus Torvalds
2009-12-02 16:24     ` Jan Beulich
2009-12-02 16:56       ` Linus Torvalds
2009-12-02 17:05         ` Linus Torvalds
2009-12-02 17:23           ` H. Peter Anvin
2009-12-02 17:48             ` Linus Torvalds
2009-12-02 17:58               ` H. Peter Anvin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox