diff -urN linux-2.5.20/arch/alpha/kernel/smc37c669.c linux/arch/alpha/kernel/smc37c669.c --- linux-2.5.20/arch/alpha/kernel/smc37c669.c 2002-06-03 03:44:51.000000000 +0200 +++ linux/arch/alpha/kernel/smc37c669.c 2002-06-09 04:58:37.000000000 +0200 @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -1103,66 +1104,7 @@ unsigned int drq ); -#if 0 -/* -** External Data Declarations -*/ - -extern struct LOCK spl_atomic; - -/* -** External Function Prototype Declarations -*/ - -/* From kernel_alpha.mar */ -extern spinlock( - struct LOCK *spl -); - -extern spinunlock( - struct LOCK *spl -); - -/* From filesys.c */ -int allocinode( - char *name, - int can_create, - struct INODE **ipp -); - -extern int null_procedure( void ); - -int smcc669_init( void ); -int smcc669_open( struct FILE *fp, char *info, char *next, char *mode ); -int smcc669_read( struct FILE *fp, int size, int number, unsigned char *buf ); -int smcc669_write( struct FILE *fp, int size, int number, unsigned char *buf ); -int smcc669_close( struct FILE *fp ); - -struct DDB smc_ddb = { - "smc", /* how this routine wants to be called */ - smcc669_read, /* read routine */ - smcc669_write, /* write routine */ - smcc669_open, /* open routine */ - smcc669_close, /* close routine */ - null_procedure, /* name expansion routine */ - null_procedure, /* delete routine */ - null_procedure, /* create routine */ - null_procedure, /* setmode */ - null_procedure, /* validation routine */ - 0, /* class specific use */ - 1, /* allows information */ - 0, /* must be stacked */ - 0, /* is a flash update driver */ - 0, /* is a block device */ - 0, /* not seekable */ - 0, /* is an Ethernet device */ - 0, /* is a filesystem driver */ -}; -#endif - -#define spinlock(x) -#define spinunlock(x) - +static spinlock_t smc_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* **++ @@ -2042,10 +1984,10 @@ ** mode. Therefore, a spinlock is placed around the two writes to ** guarantee that they complete uninterrupted. */ - spinlock( &spl_atomic ); + spin_lock(&smc_lock); wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); - spinunlock( &spl_atomic ); + spin_unlock(&smc_lock); } else { wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY ); diff -urN linux-2.5.20/arch/ppc/kernel/ppc_ksyms.c linux/arch/ppc/kernel/ppc_ksyms.c --- linux-2.5.20/arch/ppc/kernel/ppc_ksyms.c 2002-06-03 03:44:53.000000000 +0200 +++ linux/arch/ppc/kernel/ppc_ksyms.c 2002-06-09 04:58:37.000000000 +0200 @@ -217,7 +217,7 @@ EXPORT_SYMBOL(__global_sti); EXPORT_SYMBOL(__global_save_flags); EXPORT_SYMBOL(__global_restore_flags); -#ifdef SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK EXPORT_SYMBOL(_raw_spin_lock); EXPORT_SYMBOL(_raw_spin_unlock); EXPORT_SYMBOL(_raw_spin_trylock); diff -urN linux-2.5.20/arch/ppc/lib/locks.c linux/arch/ppc/lib/locks.c --- linux-2.5.20/arch/ppc/lib/locks.c 2002-06-03 03:44:44.000000000 +0200 +++ linux/arch/ppc/lib/locks.c 2002-06-09 04:58:37.000000000 +0200 @@ -16,7 +16,7 @@ #include #include -#ifdef SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #undef INIT_STUCK #define INIT_STUCK 200000000 /*0xffffffff*/ diff -urN linux-2.5.20/drivers/scsi/tmscsim.c linux/drivers/scsi/tmscsim.c --- linux-2.5.20/drivers/scsi/tmscsim.c 2002-06-03 03:44:50.000000000 +0200 +++ linux/drivers/scsi/tmscsim.c 2002-06-09 04:58:38.000000000 +0200 @@ -254,8 +254,6 @@ * undef : traditional save_flags; cli; restore_flags; */ -//#define DEBUG_SPINLOCKS 2 /* Set to 0, 1 or 2 in include/linux/spinlock.h */ - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,30) # include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,30) @@ -293,7 +291,7 @@ # if USE_SPINLOCKS == 3 /* both */ -# if defined (CONFIG_SMP) || DEBUG_SPINLOCKS > 0 +# if defined (CONFIG_SMP) # define DC390_LOCKA_INIT { spinlock_t __unlocked = SPIN_LOCK_UNLOCKED; pACB->lock = __unlocked; }; # else # define DC390_LOCKA_INIT @@ -322,7 +320,7 @@ # if USE_SPINLOCKS == 2 /* adapter specific locks */ -# if defined (CONFIG_SMP) || DEBUG_SPINLOCKS > 0 +# if defined (CONFIG_SMP) # define DC390_LOCKA_INIT { spinlock_t __unlocked = SPIN_LOCK_UNLOCKED; pACB->lock = __unlocked; }; # else # define DC390_LOCKA_INIT diff -urN linux-2.5.20/include/asm-cris/locks.h linux/include/asm-cris/locks.h --- linux-2.5.20/include/asm-cris/locks.h 2002-06-03 03:44:48.000000000 +0200 +++ linux/include/asm-cris/locks.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,133 +0,0 @@ -/* - * SMP locks primitives for building ix86 locks - * (not yet used). - * - * Alan Cox, alan@cymru.net, 1995 - */ - -/* - * This would be much easier but far less clear and easy - * to borrow for other processors if it was just assembler. - */ - -extern __inline__ void prim_spin_lock(struct spinlock *sp) -{ - int processor=smp_processor_id(); - - /* - * Grab the lock bit - */ - - while(lock_set_bit(0,&sp->lock)) - { - /* - * Failed, but that's cos we own it! - */ - - if(sp->cpu==processor) - { - sp->users++; - return 0; - } - /* - * Spin in the cache S state if possible - */ - while(sp->lock) - { - /* - * Wait for any invalidates to go off - */ - - if(smp_invalidate_needed&(1<spins++; - } - /* - * Someone wrote the line, we go 'I' and get - * the cache entry. Now try to regrab - */ - } - sp->users++;sp->cpu=processor; - return 1; -} - -/* - * Release a spin lock - */ - -extern __inline__ int prim_spin_unlock(struct spinlock *sp) -{ - /* This is safe. The decrement is still guarded by the lock. A multilock would - not be safe this way */ - if(!--sp->users) - { - lock_clear_bit(0,&sp->lock);sp->cpu= NO_PROC_ID; - return 1; - } - return 0; -} - - -/* - * Non blocking lock grab - */ - -extern __inline__ int prim_spin_lock_nb(struct spinlock *sp) -{ - if(lock_set_bit(0,&sp->lock)) - return 0; /* Locked already */ - sp->users++; - return 1; /* We got the lock */ -} - - -/* - * These wrap the locking primitives up for usage - */ - -extern __inline__ void spinlock(struct spinlock *sp) -{ - if(sp->prioritylock_order) - panic("lock order violation: %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_lock(sp)) - { - /* - * We got a new lock. Update the priority chain - */ - sp->oldpri=current->lock_order; - current->lock_order=sp->priority; - } -} - -extern __inline__ void spinunlock(struct spinlock *sp) -{ - if(current->lock_order!=sp->priority) - panic("lock release order violation %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_unlock(sp)) - { - /* - * Update the debugging lock priority chain. We dumped - * our last right to the lock. - */ - current->lock_order=sp->oldpri; - } -} - -extern __inline__ void spintestlock(struct spinlock *sp) -{ - /* - * We do no sanity checks, it's legal to optimistically - * get a lower lock. - */ - prim_spin_lock_nb(sp); -} - -extern __inline__ void spintestunlock(struct spinlock *sp) -{ - /* - * A testlock doesn't update the lock chain so we - * must not update it on free - */ - prim_spin_unlock(sp); -} diff -urN linux-2.5.20/include/asm-i386/locks.h linux/include/asm-i386/locks.h --- linux-2.5.20/include/asm-i386/locks.h 2002-06-03 03:44:51.000000000 +0200 +++ linux/include/asm-i386/locks.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,135 +0,0 @@ -/* - * SMP locks primitives for building ix86 locks - * (not yet used). - * - * Alan Cox, alan@redhat.com, 1995 - */ - -/* - * This would be much easier but far less clear and easy - * to borrow for other processors if it was just assembler. - */ - -static __inline__ void prim_spin_lock(struct spinlock *sp) -{ - int processor=smp_processor_id(); - - /* - * Grab the lock bit - */ - - while(lock_set_bit(0,&sp->lock)) - { - /* - * Failed, but that's cos we own it! - */ - - if(sp->cpu==processor) - { - sp->users++; - return 0; - } - /* - * Spin in the cache S state if possible - */ - while(sp->lock) - { - /* - * Wait for any invalidates to go off - */ - - if(smp_invalidate_needed&(1<spins++; - } - /* - * Someone wrote the line, we go 'I' and get - * the cache entry. Now try to regrab - */ - } - sp->users++;sp->cpu=processor; - return 1; -} - -/* - * Release a spin lock - */ - -static __inline__ int prim_spin_unlock(struct spinlock *sp) -{ - /* This is safe. The decrement is still guarded by the lock. A multilock would - not be safe this way */ - if(!--sp->users) - { - sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock); - return 1; - } - return 0; -} - - -/* - * Non blocking lock grab - */ - -static __inline__ int prim_spin_lock_nb(struct spinlock *sp) -{ - if(lock_set_bit(0,&sp->lock)) - return 0; /* Locked already */ - sp->users++; - return 1; /* We got the lock */ -} - - -/* - * These wrap the locking primitives up for usage - */ - -static __inline__ void spinlock(struct spinlock *sp) -{ - if(sp->prioritylock_order) - panic("lock order violation: %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_lock(sp)) - { - /* - * We got a new lock. Update the priority chain - */ - sp->oldpri=current->lock_order; - current->lock_order=sp->priority; - } -} - -static __inline__ void spinunlock(struct spinlock *sp) -{ - int pri; - if(current->lock_order!=sp->priority) - panic("lock release order violation %s (%d)\n", sp->name, current->lock_order); - pri=sp->oldpri; - if(prim_spin_unlock(sp)) - { - /* - * Update the debugging lock priority chain. We dumped - * our last right to the lock. - */ - current->lock_order=sp->pri; - } -} - -static __inline__ void spintestlock(struct spinlock *sp) -{ - /* - * We do no sanity checks, it's legal to optimistically - * get a lower lock. - */ - prim_spin_lock_nb(sp); -} - -static __inline__ void spintestunlock(struct spinlock *sp) -{ - /* - * A testlock doesn't update the lock chain so we - * must not update it on free - */ - prim_spin_unlock(sp); -} diff -urN linux-2.5.20/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h --- linux-2.5.20/include/asm-i386/spinlock.h 2002-06-03 03:44:44.000000000 +0200 +++ linux/include/asm-i386/spinlock.h 2002-06-09 04:58:38.000000000 +0200 @@ -9,30 +9,20 @@ extern int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); -/* It seems that people are forgetting to - * initialize their spinlocks properly, tsk tsk. - * Remember to turn this off in 2.4. -ben - */ -#if defined(CONFIG_DEBUG_SPINLOCK) -#define SPINLOCK_DEBUG 1 -#else -#define SPINLOCK_DEBUG 0 -#endif - /* * Your basic SMP spinlocks, allowing only a single CPU anywhere */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC #else #define SPINLOCK_MAGIC_INIT /* */ @@ -79,7 +69,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (lock->magic != SPINLOCK_MAGIC) BUG(); if (!spin_is_locked(lock)) @@ -100,7 +90,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) { char oldval = 1; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (lock->magic != SPINLOCK_MAGIC) BUG(); if (!spin_is_locked(lock)) @@ -125,7 +115,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK __label__ here; here: if (lock->magic != SPINLOCK_MAGIC) { @@ -151,14 +141,14 @@ */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC #else #define RWLOCK_MAGIC_INIT /* */ @@ -181,7 +171,7 @@ static inline void _raw_read_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif @@ -190,7 +180,7 @@ static inline void _raw_write_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif diff -urN linux-2.5.20/include/asm-ppc/spinlock.h linux/include/asm-ppc/spinlock.h --- linux-2.5.20/include/asm-ppc/spinlock.h 2002-06-03 03:44:47.000000000 +0200 +++ linux/include/asm-ppc/spinlock.h 2002-06-09 04:58:38.000000000 +0200 @@ -7,22 +7,20 @@ #include #include -#undef SPINLOCK_DEBUG - /* * Simple spin lock operations. */ typedef struct { volatile unsigned long lock; -#ifdef SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK volatile unsigned long owner_pc; volatile unsigned long owner_cpu; #endif } spinlock_t; #ifdef __KERNEL__ -#if SPINLOCK_DEBUG +#if CONFIG_DEBUG_SPINLOCK #define SPINLOCK_DEBUG_INIT , 0, 0 #else #define SPINLOCK_DEBUG_INIT /* */ @@ -34,7 +32,7 @@ #define spin_is_locked(x) ((x)->lock != 0) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#ifndef SPINLOCK_DEBUG +#ifndef CONFIG_DEBUG_SPINLOCK static inline void _raw_spin_lock(spinlock_t *lock) { @@ -88,12 +86,12 @@ */ typedef struct { volatile unsigned long lock; -#ifdef SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK volatile unsigned long owner_pc; #endif } rwlock_t; -#if SPINLOCK_DEBUG +#if CONFIG_DEBUG_SPINLOCK #define RWLOCK_DEBUG_INIT , 0 #else #define RWLOCK_DEBUG_INIT /* */ @@ -102,7 +100,7 @@ #define RW_LOCK_UNLOCKED (rwlock_t) { 0 RWLOCK_DEBUG_INIT } #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) -#ifndef SPINLOCK_DEBUG +#ifndef CONFIG_DEBUG_SPINLOCK static __inline__ void _raw_read_lock(rwlock_t *rw) { diff -urN linux-2.5.20/include/asm-x86_64/locks.h linux/include/asm-x86_64/locks.h --- linux-2.5.20/include/asm-x86_64/locks.h 2002-06-03 03:44:40.000000000 +0200 +++ linux/include/asm-x86_64/locks.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,135 +0,0 @@ -/* - * SMP locks primitives for building ix86 locks - * (not yet used). - * - * Alan Cox, alan@redhat.com, 1995 - */ - -/* - * This would be much easier but far less clear and easy - * to borrow for other processors if it was just assembler. - */ - -extern __inline__ void prim_spin_lock(struct spinlock *sp) -{ - int processor=smp_processor_id(); - - /* - * Grab the lock bit - */ - - while(lock_set_bit(0,&sp->lock)) - { - /* - * Failed, but that's cos we own it! - */ - - if(sp->cpu==processor) - { - sp->users++; - return 0; - } - /* - * Spin in the cache S state if possible - */ - while(sp->lock) - { - /* - * Wait for any invalidates to go off - */ - - if(smp_invalidate_needed&(1<spins++; - } - /* - * Someone wrote the line, we go 'I' and get - * the cache entry. Now try to regrab - */ - } - sp->users++;sp->cpu=processor; - return 1; -} - -/* - * Release a spin lock - */ - -extern __inline__ int prim_spin_unlock(struct spinlock *sp) -{ - /* This is safe. The decrement is still guarded by the lock. A multilock would - not be safe this way */ - if(!--sp->users) - { - sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock); - return 1; - } - return 0; -} - - -/* - * Non blocking lock grab - */ - -extern __inline__ int prim_spin_lock_nb(struct spinlock *sp) -{ - if(lock_set_bit(0,&sp->lock)) - return 0; /* Locked already */ - sp->users++; - return 1; /* We got the lock */ -} - - -/* - * These wrap the locking primitives up for usage - */ - -extern __inline__ void spinlock(struct spinlock *sp) -{ - if(sp->prioritylock_order) - panic("lock order violation: %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_lock(sp)) - { - /* - * We got a new lock. Update the priority chain - */ - sp->oldpri=current->lock_order; - current->lock_order=sp->priority; - } -} - -extern __inline__ void spinunlock(struct spinlock *sp) -{ - int pri; - if(current->lock_order!=sp->priority) - panic("lock release order violation %s (%d)\n", sp->name, current->lock_order); - pri=sp->oldpri; - if(prim_spin_unlock(sp)) - { - /* - * Update the debugging lock priority chain. We dumped - * our last right to the lock. - */ - current->lock_order=sp->pri; - } -} - -extern __inline__ void spintestlock(struct spinlock *sp) -{ - /* - * We do no sanity checks, it's legal to optimistically - * get a lower lock. - */ - prim_spin_lock_nb(sp); -} - -extern __inline__ void spintestunlock(struct spinlock *sp) -{ - /* - * A testlock doesn't update the lock chain so we - * must not update it on free - */ - prim_spin_unlock(sp); -} diff -urN linux-2.5.20/include/asm-x86_64/spinlock.h linux/include/asm-x86_64/spinlock.h --- linux-2.5.20/include/asm-x86_64/spinlock.h 2002-06-03 03:44:53.000000000 +0200 +++ linux/include/asm-x86_64/spinlock.h 2002-06-09 04:58:38.000000000 +0200 @@ -9,30 +9,20 @@ extern int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); -/* It seems that people are forgetting to - * initialize their spinlocks properly, tsk tsk. - * Remember to turn this off in 2.4. -ben - */ -#if defined(CONFIG_DEBUG_SPINLOCK) -#define SPINLOCK_DEBUG 1 -#else -#define SPINLOCK_DEBUG 0 -#endif - /* * Your basic SMP spinlocks, allowing only a single CPU anywhere */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC #else #define SPINLOCK_MAGIC_INIT /* */ @@ -82,7 +72,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK __label__ here; here: if (lock->magic != SPINLOCK_MAGIC) { @@ -97,7 +87,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (lock->magic != SPINLOCK_MAGIC) BUG(); if (!spin_is_locked(lock)) @@ -120,14 +110,14 @@ */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC #else #define RWLOCK_MAGIC_INIT /* */ @@ -150,7 +140,7 @@ extern inline void _raw_read_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif @@ -159,7 +149,7 @@ static inline void _raw_write_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif diff -urN linux-2.5.20/include/linux/spinlock.h linux/include/linux/spinlock.h --- linux-2.5.20/include/linux/spinlock.h 2002-06-03 03:44:49.000000000 +0200 +++ linux/include/linux/spinlock.h 2002-06-09 04:58:38.000000000 +0200 @@ -62,13 +62,9 @@ #elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously defined (e.g. by including asm/spinlock.h */ -#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */ - -#if (DEBUG_SPINLOCKS < 1) - #ifndef CONFIG_PREEMPT -#define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) -#define ATOMIC_DEC_AND_LOCK +# define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) +# define ATOMIC_DEC_AND_LOCK #endif /* @@ -78,10 +74,10 @@ */ #if (__GNUC__ > 2) typedef struct { } spinlock_t; - #define SPIN_LOCK_UNLOCKED (spinlock_t) { } +# define SPIN_LOCK_UNLOCKED (spinlock_t) { } #else typedef struct { int gcc_is_buggy; } spinlock_t; - #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #endif #define spin_lock_init(lock) do { (void)(lock); } while(0) @@ -91,42 +87,6 @@ #define spin_unlock_wait(lock) do { (void)(lock); } while(0) #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) -#elif (DEBUG_SPINLOCKS < 2) - -typedef struct { - volatile unsigned long lock; -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } - -#define spin_lock_init(x) do { (x)->lock = 0; } while (0) -#define spin_is_locked(lock) (test_bit(0,(lock))) -#define spin_trylock(lock) (!test_and_set_bit(0,(lock))) - -#define spin_lock(x) do { (x)->lock = 1; } while (0) -#define spin_unlock_wait(x) do { } while (0) -#define spin_unlock(x) do { (x)->lock = 0; } while (0) - -#else /* (DEBUG_SPINLOCKS >= 2) */ - -typedef struct { - volatile unsigned long lock; - volatile unsigned int babble; - const char *module; -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ } - -#include - -#define spin_lock_init(x) do { (x)->lock = 0; } while (0) -#define spin_is_locked(lock) (test_bit(0,(lock))) -#define spin_trylock(lock) (!test_and_set_bit(0,(lock))) - -#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0) -#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0) -#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0) - -#endif /* DEBUG_SPINLOCKS */ - /* * Read-write spinlocks, allowing multiple readers * but only one writer.