* [Linux-ia64] doh! NMCS patches attached.
@ 2002-02-15 21:03 john stultz
0 siblings, 0 replies; only message in thread
From: john stultz @ 2002-02-15 21:03 UTC (permalink / raw)
To: linux-ia64
[-- Attachment #1: Type: text/plain, Size: 15 bytes --]
<smack>
-john
[-- Attachment #2: mcslock-2.4.17-C1.patch --]
[-- Type: text/plain, Size: 6669 bytes --]
Index: linux24/include/asm-ia64/processor.h
diff -u linux24/include/asm-ia64/processor.h:1.1.1.1 linux24/include/asm-ia64/processor.h:1.1.1.1.20.1
--- linux24/include/asm-ia64/processor.h:1.1.1.1 Tue Dec 18 15:49:04 2001
+++ linux24/include/asm-ia64/processor.h Fri Feb 15 09:56:30 2002
@@ -547,6 +547,8 @@
extern void ia64_load_pm_regs (struct task_struct *task);
#endif
+#define rep_nop() /*not an ia64 op*/
+
#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
Index: linux24/include/linux/mcslock.h
diff -u /dev/null linux24/include/linux/mcslock.h:1.1.4.11
--- /dev/null Fri Feb 15 12:15:44 2002
+++ linux24/include/linux/mcslock.h Fri Feb 15 09:54:13 2002
@@ -0,0 +1,158 @@
+/*
+ MCS and Nodeless MCS Lock implementations.
+ John Stultz (johnstul@us.ibm.com)
+
+ New Nodeless MCS Lock origianally designed for K42 by
+ Marc Auslander, David Edelsohn, Orran Y Krieger,
+ Bryan S Rosenburg, and Robert W Wisniewski.
+ http://www.research.ibm.com/K42/
+
+*/
+#ifndef _MCS_LOCKS_H
+#define _MCS_LOCKS_H
+
+#include <linux/types.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+#include <asm/processor.h>
+
+/*==[MCS Lock]=========================================================*/
+struct mcsnode_t {
+ struct mcsnode_t* volatile next;
+ volatile int flag;
+};
+typedef struct mcsnode_t* volatile mcslock_t;
+typedef struct mcsnode_t mcsnode_t;
+
+#define MCS_LOCK_UNLOCKED (mcslock_t) NULL
+#define MCS_NODE_UNLOCKED (mcsnode_t) {NULL, 0}
+
+#define mcs_lock_init(x) do { *(x) = MCS_LOCK_UNLOCKED; } while(0)
+#define mcs_is_locked(x) ((*(mcsnode_t*)(x))!=NULL)
+#define mcs_unlock_wait(x) do {barrier();} while(mcs_is_locked(x))
+
+#define mcs_lock_irq(x,y) \
+ do { local_irq_disable(); mcs_lock(x,y); } while(0)
+#define mcs_unlock_irq(x,y) \
+ do { mcs_unlock(x,y); local_irq_enable(); } while(0)
+#define mcs_lock_irqsave(x,y,flags) \
+ do { local_irq_save(flags); mcs_lock(x,y); } while(0)
+#define mcs_unlock_irqrestore(x,y,flags) \
+ do { mcs_unlock(x,y); local_irq_restore(flags); } while(0)
+
+#ifndef CONFIG_SMP
+/*UP nops*/
+#define mcs_lock(x,y) (void*)(y)
+#define mcs_trylock(x,y) (1)
+#define mcs_unlock(x,y) (void*)(x)
+
+#else /*!CONFIG_SMP*/
+
+extern void mcs_lock(mcslock_t* lock, mcsnode_t* instance);
+static inline void __mcs_lock(mcslock_t* lock, mcsnode_t* instance)
+{
+ mcsnode_t* before;
+ instance->next = NULL;
+ before = xchg((mcsnode_t**)lock,instance);
+ if (before != NULL) {
+ instance->flag = 1;
+ before->next = instance;
+ while(instance->flag){rep_nop();}
+ }
+}
+
+extern int mcs_trylock(mcslock_t* lock, mcsnode_t* instance);
+static inline int __mcs_trylock(mcslock_t* lock, mcsnode_t* instance)
+{
+ if(NULL == cmpxchg(lock,NULL,instance))
+ return 1;
+ return 0;
+}
+
+extern void mcs_unlock(mcslock_t* lock, mcsnode_t* instance);
+static inline void __mcs_unlock(mcslock_t* lock, mcsnode_t* instance)
+{
+ if(instance->next == NULL) {
+ if(instance == cmpxchg(lock,instance,NULL))
+ return;
+ while(instance->next == NULL){rep_nop();}
+ }
+ wmb();
+ instance->next->flag = 0;
+}
+#endif /*!CONFIG_SMP*/
+
+
+/*==[Nodeless MCS Lock]===============================================*/
+
+struct nmcslock_t{
+ mcsnode_t holder;
+ mcslock_t list;
+};
+
+typedef struct nmcslock_t nmcslock_t;
+
+#define NMCS_LOCK_UNLOCKED (nmcslock_t) {MCS_NODE_UNLOCKED,MCS_LOCK_UNLOCKED}
+
+#define nmcs_lock_init(x) do { *(x) = NMCS_LOCK_UNLOCKED; } while(0)
+#define nmcs_is_locked(x) ((x)->list != 0)
+#define nmcs_unlock_wait(x) do {barrier();} while(nmcs_is_locked(x))
+
+
+#ifndef CONFIG_SMP
+/*UP nops*/
+#define nmcs_lock(x) (void*)(x)
+#define nmcs_trylock(x) (1)
+#define nmcs_unlock(x) (void*)(x)
+
+#else /*!CONFIG_SMP*/
+
+extern void nmcs_lock(nmcslock_t* lock);
+static inline void __nmcs_lock(nmcslock_t* lock)
+{
+ while(1){
+ if(NULL == cmpxchg(&lock->list, NULL, &lock->holder)){
+ return;
+ }else{
+ mcsnode_t instance = MCS_NODE_UNLOCKED;
+ mcsnode_t* before = (mcsnode_t*)lock->list;
+ if(before && (before == cmpxchg(&lock->list, before, &instance)))
+ {
+ instance.flag = 1;
+ before->next = &instance;
+ while(instance.flag){rep_nop();}
+ lock->holder.next = NULL;
+ if(&instance != cmpxchg(&lock->list, &instance, &lock->holder)){
+ while(!instance.next){rep_nop();}
+ lock->holder.next = instance.next;
+ }
+ return;
+ }
+ }
+ /*whoops! the list changed on us. try again*/
+ }
+}
+
+extern int nmcs_trylock(nmcslock_t* lock);
+static inline int __nmcs_trylock(nmcslock_t* lock)
+{
+ if(NULL == cmpxchg(&lock->list, NULL, &lock->holder))
+ return 1;
+ return 0;
+}
+
+extern void nmcs_unlock(nmcslock_t* lock);
+static inline void __nmcs_unlock(nmcslock_t* lock)
+{
+ wmb();
+ if(&lock->holder != cmpxchg(&lock->list, &lock->holder, NULL))
+ {
+ while(lock->holder.next == NULL){rep_nop();}
+ lock->holder.next->flag = 0;
+ }
+}
+
+#endif /*!CONFIG_SMP*/
+
+#endif /*!_MCS_LOCKS_H*/
Index: linux24/lib/Makefile
diff -u linux24/lib/Makefile:1.1.1.1 linux24/lib/Makefile:1.1.1.1.18.1
--- linux24/lib/Makefile:1.1.1.1 Tue Dec 18 15:49:00 2001
+++ linux24/lib/Makefile Tue Feb 12 14:15:38 2002
@@ -10,7 +10,7 @@
export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o
-obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o bust_spinlocks.o rbtree.o
+obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o bust_spinlocks.o rbtree.o mcslock.o
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
Index: linux24/lib/mcslock.c
diff -u /dev/null linux24/lib/mcslock.c:1.1.4.1
--- /dev/null Fri Feb 15 12:15:44 2002
+++ linux24/lib/mcslock.c Tue Feb 12 14:15:38 2002
@@ -0,0 +1,31 @@
+#include <linux/mcslock.h>
+
+/*non inlined versions of mcs lock*/
+void mcs_lock(mcslock_t* lock, mcsnode_t* instance)
+{
+ __mcs_lock(lock, instance);
+}
+int mcs_trylock(mcslock_t* lock, mcsnode_t* instance)
+{
+ return __mcs_trylock(lock, instance);
+}
+void mcs_unlock(mcslock_t* lock, mcsnode_t* instance)
+{
+ __mcs_unlock(lock, instance);
+}
+
+/*non inlined versions of nodeless mcs lock*/
+void nmcs_lock(nmcslock_t* lock)
+{
+ __nmcs_lock(lock);
+}
+
+int nmcs_trylock(nmcslock_t* lock)
+{
+ return __nmcs_trylock(lock);
+}
+
+void nmcs_unlock(nmcslock_t* lock)
+{
+ __nmcs_unlock(lock);
+}
[-- Attachment #3: spinlocks-replaced-2.4.17-C1.patch --]
[-- Type: text/plain, Size: 2211 bytes --]
Index: linux24/include/asm-i386/spinlock.h
diff -u linux24/include/asm-i386/spinlock.h:1.1.1.1 linux24/include/asm-i386/spinlock.h:1.1.1.1.22.1
--- linux24/include/asm-i386/spinlock.h:1.1.1.1 Tue Dec 18 15:49:01 2001
+++ linux24/include/asm-i386/spinlock.h Fri Jan 25 11:57:13 2002
@@ -23,6 +23,8 @@
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
+#define USE_NMCS_LOCK
+#ifndef USE_NMCS_LOCK
typedef struct {
volatile unsigned int lock;
#if SPINLOCK_DEBUG
@@ -137,7 +139,20 @@
spin_lock_string
:"=m" (lock->lock) : : "memory");
}
+#else /*!USE_NMCS_LOCK*/
+#include <linux/mcslock.h>
+#define spinlock_t nmcslock_t
+#define SPIN_LOCK_UNLOCKED NMCS_LOCK_UNLOCKED
+
+#define spin_lock_init(x) nmcs_lock_init(x)
+#define spin_is_locked(x) nmcs_is_locked(x)
+#define spin_unlock_wait(x) nmcs_unlock_wait(x)
+
+#define spin_lock(x) nmcs_lock(x)
+#define spin_trylock(x) nmcs_trylock(x)
+#define spin_unlock(x) nmcs_unlock(x)
+#endif
/*
* Read-write spinlocks, allowing multiple readers
Index: linux24/include/asm-ia64/spinlock.h
diff -u linux24/include/asm-ia64/spinlock.h:1.1.1.1 linux24/include/asm-ia64/spinlock.h:1.1.1.1.22.1
--- linux24/include/asm-ia64/spinlock.h:1.1.1.1 Tue Dec 18 15:49:04 2001
+++ linux24/include/asm-ia64/spinlock.h Tue Jan 29 15:58:02 2002
@@ -67,6 +67,8 @@
#else /* !NEW_LOCK */
+#define USE_NMCS_LOCK
+#ifndef USE_NMCS_LOCK
typedef struct {
volatile unsigned int lock;
} spinlock_t;
@@ -99,7 +101,20 @@
#define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
+#else /*!USE_NMCS_LOCK*/
+#include <linux/mcslock.h>
+#define spinlock_t nmcslock_t
+#define SPIN_LOCK_UNLOCKED NMCS_LOCK_UNLOCKED
+#define spin_lock_init(x) nmcs_lock_init(x)
+#define spin_is_locked(x) nmcs_is_locked(x)
+#define spin_unlock_wait(x) nmcs_unlock_wait(x)
+
+#define spin_lock(x) nmcs_lock(x)
+#define spin_trylock(x) nmcs_trylock(x)
+#define spin_unlock(x) nmcs_unlock(x)
+
+#endif
#endif /* !NEW_LOCK */
typedef struct {
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2002-02-15 21:03 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2002-02-15 21:03 [Linux-ia64] doh! NMCS patches attached john stultz
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox