diff -Nar -u4 2.4.1/arch/i386/kernel/semaphore.c 2.4.1-trylock/arch/i386/kernel/semaphore.c --- 2.4.1/arch/i386/kernel/semaphore.c Sat Nov 18 17:31:25 2000 +++ 2.4.1-trylock/arch/i386/kernel/semaphore.c Fri Feb 16 18:13:16 2001 @@ -382,8 +382,41 @@ return sem; } +/* We have the bias, but we can't sleep. We have to get rid of it + * as gracefully as we can. + * + * This routine does have the unfortunate side-effect that we could + * spin for awhile if there's a lot of contention for this lock. If + * that's the case, however, then it's less likely that we would hold + * the bias and be running this code. + */ +void __up_biased(int val, struct rw_semaphore *sem) +{ + int count, newcount; +repeat: + /* Does it look like we're racing with another contender? */ + count = atomic_read(&sem->count); + newcount = count + val; + if (newcount < 0) + /* Yes: Try again. */ + goto repeat; + else + /* No: Bump the count while no one's looking. Did we race? */ + if (cmpxchg((volatile int *)&sem->count, count, newcount) + != count) + /* Yes: Try again. */ + goto repeat; + else + /* No: Let the real waiters duke it out for the bias. + * FIXME: This has the same potential stampede problem + * as down_write_failed_biased(). + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); +} + asm( " .align 4 .globl __rwsem_wake diff -Nar -u4 2.4.1/include/asm-i386/atomic.h 2.4.1-trylock/include/asm-i386/atomic.h --- 2.4.1/include/asm-i386/atomic.h Thu Jan 4 14:50:46 2001 +++ 2.4.1-trylock/include/asm-i386/atomic.h Fri Feb 16 18:13:16 2001 @@ -52,8 +52,21 @@ :"ir" (i), "m" (v->counter) : "memory"); return c; } +#define ATOMIC_SUB_SIGN_BIT 0x1 +#define ATOMIC_SUB_CARRY_BIT 0x2 +static __inline__ int atomic_sub_sign_and_carry(int i, atomic_t *v) +{ + unsigned char s, c; + + __asm__ __volatile__( + LOCK "subl %3,%0; sets %1; setc %2" + :"=m" (v->counter), "=qm" (s), "=qm" (c) + :"ir" (i), "m" (v->counter) : "memory"); + return s | c<<1; +} + static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( LOCK "incl %0" diff -Nar -u4 2.4.1/include/asm-i386/semaphore.h 2.4.1-trylock/include/asm-i386/semaphore.h --- 2.4.1/include/asm-i386/semaphore.h Thu Jan 4 14:50:46 2001 +++ 2.4.1-trylock/include/asm-i386/semaphore.h Fri Feb 16 18:13:16 2001 @@ -381,6 +381,76 @@ #endif __up_write(sem); } +extern void __up_biased(int val, struct rw_semaphore *sem); + +static inline int down_read_trylock(struct rw_semaphore *sem) +{ + int retval; +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif + retval = atomic_sub_sign_and_carry(1, &sem->count); + /* Did we get the lock? */ + if (retval & ATOMIC_SUB_SIGN_BIT) { + /* No: Does someone else have the bias? */ + if (retval & ATOMIC_SUB_CARRY_BIT) + /* No: Guess we have to do this the hard way. */ + __up_biased(1, sem); + else + /* Yes: Fix the count and pretend nothing happened. */ + __up_read(sem); + return 1; + } + else { + /* Yes: We got the lock!! */ +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif + return 0; + } +} + +static inline int down_write_trylock(struct rw_semaphore *sem) +{ + int retval; +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif + retval = atomic_sub_sign_and_carry(RW_LOCK_BIAS, &sem->count); + /* Did we get the lock? */ + if (retval & ATOMIC_SUB_SIGN_BIT) { + /* No: Does someone else have the bias? */ + if (retval & ATOMIC_SUB_CARRY_BIT) + /* No: Guess we have to do this the hard way. */ + __up_biased(RW_LOCK_BIAS, sem); + else + /* Yes: Fix the count and pretend nothing happened. */ + __up_write(sem); + return 1; + } + else { + /* Yes: We got the lock!! */ +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + atomic_inc(&sem->writers); +#endif + return 0; + } +} + #endif #endif