public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [RFC][PATCH] rwsem: rwsem_is_{read,write}_locked
@ 2009-02-24 21:58 Peter Zijlstra
  2009-02-24 23:21 ` Peter Zijlstra
  0 siblings, 1 reply; 2+ messages in thread
From: Peter Zijlstra @ 2009-02-24 21:58 UTC (permalink / raw)
  To: lkml
  Cc: David Howells, Matthew Wilcox, Ingo Molnar, Nick Piggin,
	Christoph Hellwig, Linus Torvalds

Christoph requested an extended is_locked interface for rwsems. I think
the below ought to do, then again, I might have missed something.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 arch/alpha/include/asm/rwsem.h   |    5 -----
 arch/ia64/include/asm/rwsem.h    |    5 -----
 arch/powerpc/include/asm/rwsem.h |    5 -----
 arch/s390/include/asm/rwsem.h    |    5 -----
 arch/sh/include/asm/rwsem.h      |    5 -----
 arch/sparc/include/asm/rwsem.h   |    5 -----
 arch/x86/include/asm/rwsem.h     |    5 -----
 arch/xtensa/include/asm/rwsem.h  |    5 -----
 include/linux/rwsem-spinlock.h   |    5 +++++
 include/linux/rwsem.h            |   16 ++++++++++++++++
 10 files changed, 21 insertions(+), 40 deletions(-)

diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 1570c0b..fdc8dc3 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -250,10 +250,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
 #endif
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ALPHA_RWSEM_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b..4b4bfa5 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -174,9 +174,4 @@ __downgrade_write (struct rw_semaphore *sem)
 #define rwsem_atomic_add(delta, sem)	atomic64_add(delta, (atomic64_t *)(&(sem)->count))
 #define rwsem_atomic_update(delta, sem)	atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index 24cd928..27d1acf 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -164,10 +164,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif	/* __KERNEL__ */
 #endif	/* _ASM_POWERPC_RWSEM_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 9d2a179..d9ab6c6 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -378,10 +378,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 	return new;
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _S390_RWSEM_H */
diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h
index 1987f3e..5aded81 100644
--- a/arch/sh/include/asm/rwsem.h
+++ b/arch/sh/include/asm/rwsem.h
@@ -179,10 +179,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_SH_RWSEM_H */
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h
index 1dc129a..a3f43c9 100644
--- a/arch/sparc/include/asm/rwsem.h
+++ b/arch/sparc/include/asm/rwsem.h
@@ -74,11 +74,6 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
 	atomic_add(delta, (atomic_t *)(&sem->count));
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _SPARC64_RWSEM_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index ca7517d..a22dc3b 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -256,10 +256,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 	return tmp + delta;
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_RWSEM_H */
diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h
index e39edf5..e960d99 100644
--- a/arch/xtensa/include/asm/rwsem.h
+++ b/arch/xtensa/include/asm/rwsem.h
@@ -160,9 +160,4 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
-	return (sem->count != 0);
-}
-
 #endif	/* _XTENSA_RWSEM_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6..00c76f7 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -74,5 +74,10 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 	return (sem->activity != 0);
 }
 
+static inline int rwsem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->activity < 0);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index efd348f..9d7531b 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -20,8 +20,24 @@ struct rw_semaphore;
 #include <linux/rwsem-spinlock.h> /* use a generic implementation */
 #else
 #include <asm/rwsem.h> /* use an arch-specific implementation */
+
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+	return (sem->count != 0);
+}
+
+static inline int rwsem_is_write_locked(struct rw_semaphore *sem)
+{
+	return (sem->count & RWSEM_ACTIVE_MASK) == RWSEM_ACTIVE_MASK;
+}
+
 #endif
 
+static inline int rwsem_is_read_locked(struct rw_semaphore *sem)
+{
+	return rwsem_is_locked(sem) && !rwsem_is_write_locked(sem);
+}
+
 /*
  * lock for reading
  */



^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [RFC][PATCH] rwsem: rwsem_is_{read,write}_locked
  2009-02-24 21:58 [RFC][PATCH] rwsem: rwsem_is_{read,write}_locked Peter Zijlstra
@ 2009-02-24 23:21 ` Peter Zijlstra
  0 siblings, 0 replies; 2+ messages in thread
From: Peter Zijlstra @ 2009-02-24 23:21 UTC (permalink / raw)
  To: lkml
  Cc: David Howells, Matthew Wilcox, Ingo Molnar, Nick Piggin,
	Christoph Hellwig, Linus Torvalds

On Tue, 2009-02-24 at 22:58 +0100, Peter Zijlstra wrote:
> Christoph requested an extended is_locked interface for rwsems. I think
> the below ought to do, then again, I might have missed something.
> 
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
> ---

> +++ b/include/linux/rwsem.h
> @@ -20,8 +20,24 @@ struct rw_semaphore;
>  #include <linux/rwsem-spinlock.h> /* use a generic implementation */
>  #else
>  #include <asm/rwsem.h> /* use an arch-specific implementation */
> +
> +static inline int rwsem_is_locked(struct rw_semaphore *sem)
> +{
> +	return (sem->count != 0);
> +}
> +
> +static inline int rwsem_is_write_locked(struct rw_semaphore *sem)
> +{
> +	return (sem->count & RWSEM_ACTIVE_MASK) == RWSEM_ACTIVE_MASK;
> +}

OK, that's not right. Concurrent down_write() calls xadd
RWSEM_ACTIVE_WRITE_BIAS, which has a 1 in that mask, so we're going to
deviate an unspecified number.

That one will make it roll over into (count & MASK) >= 0 which is the
exact range readers use too, this might be a tad harder than hoped.

Will ponder in the morning.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2009-02-24 23:22 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-02-24 21:58 [RFC][PATCH] rwsem: rwsem_is_{read,write}_locked Peter Zijlstra
2009-02-24 23:21 ` Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox