Index: linux24/include/asm-ia64/processor.h diff -u linux24/include/asm-ia64/processor.h:1.1.1.1 linux24/include/asm-ia64/processor.h:1.1.1.1.20.1 --- linux24/include/asm-ia64/processor.h:1.1.1.1 Tue Dec 18 15:49:04 2001 +++ linux24/include/asm-ia64/processor.h Fri Feb 15 09:56:30 2002 @@ -547,6 +547,8 @@ extern void ia64_load_pm_regs (struct task_struct *task); #endif +#define rep_nop() /*not an ia64 op*/ + #define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory"); #define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory"); Index: linux24/include/linux/mcslock.h diff -u /dev/null linux24/include/linux/mcslock.h:1.1.4.11 --- /dev/null Fri Feb 15 12:15:44 2002 +++ linux24/include/linux/mcslock.h Fri Feb 15 09:54:13 2002 @@ -0,0 +1,158 @@ +/* + MCS and Nodeless MCS Lock implementations. + John Stultz (johnstul@us.ibm.com) + + New Nodeless MCS Lock origianally designed for K42 by + Marc Auslander, David Edelsohn, Orran Y Krieger, + Bryan S Rosenburg, and Robert W Wisniewski. + http://www.research.ibm.com/K42/ + +*/ +#ifndef _MCS_LOCKS_H +#define _MCS_LOCKS_H + +#include +#include +#include +#include +#include + +/*==[MCS Lock]=========================================================*/ +struct mcsnode_t { + struct mcsnode_t* volatile next; + volatile int flag; +}; +typedef struct mcsnode_t* volatile mcslock_t; +typedef struct mcsnode_t mcsnode_t; + +#define MCS_LOCK_UNLOCKED (mcslock_t) NULL +#define MCS_NODE_UNLOCKED (mcsnode_t) {NULL, 0} + +#define mcs_lock_init(x) do { *(x) = MCS_LOCK_UNLOCKED; } while(0) +#define mcs_is_locked(x) ((*(mcsnode_t*)(x))!=NULL) +#define mcs_unlock_wait(x) do {barrier();} while(mcs_is_locked(x)) + +#define mcs_lock_irq(x,y) \ + do { local_irq_disable(); mcs_lock(x,y); } while(0) +#define mcs_unlock_irq(x,y) \ + do { mcs_unlock(x,y); local_irq_enable(); } while(0) +#define mcs_lock_irqsave(x,y,flags) \ + do { local_irq_save(flags); mcs_lock(x,y); } while(0) +#define mcs_unlock_irqrestore(x,y,flags) \ + do { mcs_unlock(x,y); local_irq_restore(flags); } while(0) + +#ifndef CONFIG_SMP +/*UP nops*/ +#define mcs_lock(x,y) (void*)(y) +#define mcs_trylock(x,y) (1) +#define mcs_unlock(x,y) (void*)(x) + +#else /*!CONFIG_SMP*/ + +extern void mcs_lock(mcslock_t* lock, mcsnode_t* instance); +static inline void __mcs_lock(mcslock_t* lock, mcsnode_t* instance) +{ + mcsnode_t* before; + instance->next = NULL; + before = xchg((mcsnode_t**)lock,instance); + if (before != NULL) { + instance->flag = 1; + before->next = instance; + while(instance->flag){rep_nop();} + } +} + +extern int mcs_trylock(mcslock_t* lock, mcsnode_t* instance); +static inline int __mcs_trylock(mcslock_t* lock, mcsnode_t* instance) +{ + if(NULL == cmpxchg(lock,NULL,instance)) + return 1; + return 0; +} + +extern void mcs_unlock(mcslock_t* lock, mcsnode_t* instance); +static inline void __mcs_unlock(mcslock_t* lock, mcsnode_t* instance) +{ + if(instance->next == NULL) { + if(instance == cmpxchg(lock,instance,NULL)) + return; + while(instance->next == NULL){rep_nop();} + } + wmb(); + instance->next->flag = 0; +} +#endif /*!CONFIG_SMP*/ + + +/*==[Nodeless MCS Lock]===============================================*/ + +struct nmcslock_t{ + mcsnode_t holder; + mcslock_t list; +}; + +typedef struct nmcslock_t nmcslock_t; + +#define NMCS_LOCK_UNLOCKED (nmcslock_t) {MCS_NODE_UNLOCKED,MCS_LOCK_UNLOCKED} + +#define nmcs_lock_init(x) do { *(x) = NMCS_LOCK_UNLOCKED; } while(0) +#define nmcs_is_locked(x) ((x)->list != 0) +#define nmcs_unlock_wait(x) do {barrier();} while(nmcs_is_locked(x)) + + +#ifndef CONFIG_SMP +/*UP nops*/ +#define nmcs_lock(x) (void*)(x) +#define nmcs_trylock(x) (1) +#define nmcs_unlock(x) (void*)(x) + +#else /*!CONFIG_SMP*/ + +extern void nmcs_lock(nmcslock_t* lock); +static inline void __nmcs_lock(nmcslock_t* lock) +{ + while(1){ + if(NULL == cmpxchg(&lock->list, NULL, &lock->holder)){ + return; + }else{ + mcsnode_t instance = MCS_NODE_UNLOCKED; + mcsnode_t* before = (mcsnode_t*)lock->list; + if(before && (before == cmpxchg(&lock->list, before, &instance))) + { + instance.flag = 1; + before->next = &instance; + while(instance.flag){rep_nop();} + lock->holder.next = NULL; + if(&instance != cmpxchg(&lock->list, &instance, &lock->holder)){ + while(!instance.next){rep_nop();} + lock->holder.next = instance.next; + } + return; + } + } + /*whoops! the list changed on us. try again*/ + } +} + +extern int nmcs_trylock(nmcslock_t* lock); +static inline int __nmcs_trylock(nmcslock_t* lock) +{ + if(NULL == cmpxchg(&lock->list, NULL, &lock->holder)) + return 1; + return 0; +} + +extern void nmcs_unlock(nmcslock_t* lock); +static inline void __nmcs_unlock(nmcslock_t* lock) +{ + wmb(); + if(&lock->holder != cmpxchg(&lock->list, &lock->holder, NULL)) + { + while(lock->holder.next == NULL){rep_nop();} + lock->holder.next->flag = 0; + } +} + +#endif /*!CONFIG_SMP*/ + +#endif /*!_MCS_LOCKS_H*/ Index: linux24/lib/Makefile diff -u linux24/lib/Makefile:1.1.1.1 linux24/lib/Makefile:1.1.1.1.18.1 --- linux24/lib/Makefile:1.1.1.1 Tue Dec 18 15:49:00 2001 +++ linux24/lib/Makefile Tue Feb 12 14:15:38 2002 @@ -10,7 +10,7 @@ export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o -obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o bust_spinlocks.o rbtree.o +obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o bust_spinlocks.o rbtree.o mcslock.o obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o Index: linux24/lib/mcslock.c diff -u /dev/null linux24/lib/mcslock.c:1.1.4.1 --- /dev/null Fri Feb 15 12:15:44 2002 +++ linux24/lib/mcslock.c Tue Feb 12 14:15:38 2002 @@ -0,0 +1,31 @@ +#include + +/*non inlined versions of mcs lock*/ +void mcs_lock(mcslock_t* lock, mcsnode_t* instance) +{ + __mcs_lock(lock, instance); +} +int mcs_trylock(mcslock_t* lock, mcsnode_t* instance) +{ + return __mcs_trylock(lock, instance); +} +void mcs_unlock(mcslock_t* lock, mcsnode_t* instance) +{ + __mcs_unlock(lock, instance); +} + +/*non inlined versions of nodeless mcs lock*/ +void nmcs_lock(nmcslock_t* lock) +{ + __nmcs_lock(lock); +} + +int nmcs_trylock(nmcslock_t* lock) +{ + return __nmcs_trylock(lock); +} + +void nmcs_unlock(nmcslock_t* lock) +{ + __nmcs_unlock(lock); +}