From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jeremy Fitzhardinge Subject: [PATCH 02/13] x86/ticketlock: collapse a layer of functions Date: Thu, 1 Sep 2011 17:54:55 -0700 Message-ID: <860046dfb09b88e19854366ca1fc29b027b2fd27.1314922370.git.jeremy.fitzhardinge@citrix.com> References: Return-path: In-Reply-To: In-Reply-To: References: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: "H. Peter Anvin" Cc: Marcelo Tosatti , Nick Piggin , KVM , Peter Zijlstra , the arch/x86 maintainers , Linux Kernel Mailing List , Andi Kleen , Avi Kivity , Jeremy Fitzhardinge , Ingo Molnar , Linus Torvalds , Xen Devel List-Id: xen-devel@lists.xenproject.org From: Jeremy Fitzhardinge Now that the paravirtualization layer doesn't exist at the spinlock level any more, we can collapse the __ticket_ functions into the arch_ functions. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/spinlock.h | 35 +++++------------------------------ 1 files changed, 5 insertions(+), 30 deletions(-) diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index f5d9236..c1d9617 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -81,7 +81,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, __t * save some instructions and make the code more elegant. There really isn't * much between them in performance though, especially as locks are out of line. */ -static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock) +static __always_inline void arch_spin_lock(struct arch_spinlock *lock) { register struct __raw_tickets inc = { .tail = 1 }; @@ -101,7 +101,7 @@ static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock) out: barrier(); /* make sure nothing creeps before the lock is taken */ } -static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { arch_spinlock_t old, new; @@ -133,7 +133,7 @@ static __always_inline void __ticket_unlock_release(arch_spinlock_t *lock) } #endif -static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_t next = lock->tickets.head + 1; @@ -141,46 +141,21 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) __ticket_unlock_kick(lock, next); } -static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); return !!(tmp.tail ^ tmp.head); } -static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); return ((tmp.tail - tmp.head) & TICKET_MASK) > 1; } - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - return __ticket_spin_is_locked(lock); -} - -static inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - return __ticket_spin_is_contended(lock); -} #define arch_spin_is_contended arch_spin_is_contended -static __always_inline void arch_spin_lock(arch_spinlock_t *lock) -{ - __ticket_spin_lock(lock); -} - -static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - return __ticket_spin_trylock(lock); -} - -static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - __ticket_spin_unlock(lock); -} - static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { -- 1.7.6