--- a/arch/powerpc/include/asm/spinlock.h 2017-11-18 20:57:47.921703650 +0100 +++ b/arch/powerpc/include/asm/spinlock.h 2017-11-19 18:51:51.630906446 +0100 @@ -182,6 +182,9 @@ static inline void arch_spin_unlock(arch * read-locks. */ +#define arch_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_write_can_lock(rw) (!(rw)->lock) + #ifdef CONFIG_PPC64 #define __DO_SIGN_EXTEND "extsw %0,%0\n" #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ --- a/kernel/locking/spinlock.c 2017-11-18 20:57:48.974708915 +0100 +++ b/kernel/locking/spinlock.c 2017-11-19 18:26:18.526892449 +0100 @@ -35,6 +35,9 @@ */ #else +#define raw_read_can_lock(l) read_can_lock(l) +#define raw_write_can_lock(l) write_can_lock(l) + /* * Some architectures can relax in favour of the CPU owning the lock. */ @@ -68,7 +71,7 @@ void __lockfunc __raw_##op##_lock(lockty \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while ((lock)->break_lock) \ + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ @@ -88,7 +91,7 @@ unsigned long __lockfunc __raw_##op##_lo \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while ((lock)->break_lock) \ + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ --- a/include/linux/spinlock_up.h 2017-11-18 20:57:38.788657985 +0100 +++ b/include/linux/spinlock_up.h 2017-11-19 18:48:19.787635450 +0100 @@ -69,4 +69,7 @@ static inline void arch_spin_unlock(arch #define arch_spin_is_contended(lock) (((void)(lock), 0)) +#define arch_read_can_lock(lock) (((void)(lock), 1)) +#define arch_write_can_lock(lock) (((void)(lock), 1)) + #endif /* __LINUX_SPINLOCK_UP_H */ --- a/include/linux/spinlock.h 2017-11-18 20:57:38.693657510 +0100 +++ b/include/linux/spinlock.h 2017-11-19 18:37:36.933284464 +0100 @@ -283,6 +283,12 @@ static inline void do_raw_spin_unlock(ra 1 : ({ local_irq_restore(flags); 0; }); \ }) +/** + * raw_spin_can_lock - would raw_spin_trylock() succeed? + * @lock: the spinlock in question. + */ +#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + /* Include rwlock functions */ #include @@ -395,6 +401,11 @@ static __always_inline int spin_is_conte return raw_spin_is_contended(&lock->rlock); } +static __always_inline int spin_can_lock(spinlock_t *lock) +{ + return raw_spin_can_lock(&lock->rlock); +} + #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) /* --- a/include/linux/rwlock.h 2017-11-18 20:57:38.652657305 +0100 +++ b/include/linux/rwlock.h 2017-11-19 18:19:54.045970054 +0100 @@ -59,6 +59,9 @@ do { \ # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) #endif +#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) + /* * Define the various rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various