linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch 1/2] kernel: introduce brlock
@ 2010-03-16 12:22 Nick Piggin
  2010-03-16 12:23 ` [patch 2/2] fs: scale vfsmount_lock Nick Piggin
  2010-03-16 19:01 ` [patch 1/2] kernel: introduce brlock Andreas Dilger
  0 siblings, 2 replies; 9+ messages in thread
From: Nick Piggin @ 2010-03-16 12:22 UTC (permalink / raw)
  To: Al Viro, Frank Mayhar, John Stultz, Andi Kleen, linux-fsdevel

This second patchset scales the vfsmount lock. When it was last posted,
you were worried about commenting of lock requirements, and impact on
the slowpath. I have added comments and also done some slowpath measurements.

--
brlock: introduce special brlocks

This patch introduces special brlocks, these can only be used as global
locks, and use some preprocessor trickery to allow us to retain a more
optimal per-cpu lock implementation. We don't bother working around
lockdep yet.

The other thing we can do in future is a really neat atomic-free
implementation like Dave M did for the old brlocks, so we might actually
be able to speed up the single-thread path for these things.

Signed-off-by: Nick Piggin <npiggin@suse.de>
---
 include/linux/brlock.h |  112 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 112 insertions(+)

Index: linux-2.6/include/linux/brlock.h
===================================================================
--- /dev/null
+++ linux-2.6/include/linux/brlock.h
@@ -0,0 +1,112 @@
+/*
+ * Specialised big-reader spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * Copyright 2009, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_BRLOCK_H
+#define __LINUX_BRLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_LOCKDEP)
+#define DECLARE_BRLOCK(name)						\
+ DECLARE_PER_CPU(spinlock_t, name##_lock);				\
+ extern void name##_lock_init(void);					\
+ static inline void name##_rlock(void) {				\
+	spinlock_t *lock;						\
+	lock = &get_cpu_var(name##_lock);				\
+	spin_lock(lock);						\
+	put_cpu_var(name##_lock);					\
+ }									\
+ static inline void name##_runlock(void) {				\
+	spinlock_t *lock;						\
+	lock = &__get_cpu_var(name##_lock);				\
+	spin_unlock(lock);						\
+ }									\
+ extern void name##_wlock(void);					\
+ extern void name##_wunlock(void);					\
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) {		\
+	int ret;							\
+	spinlock_t *lock;						\
+	lock = &get_cpu_var(name##_lock);				\
+	ret = atomic_dec_and_lock(a, lock);				\
+	put_cpu_var(name##_lock);					\
+	return ret;							\
+ }									\
+ extern int name##_atomic_dec_and_wlock__failed(atomic_t *a);		\
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) {		\
+	if (atomic_add_unless(a, -1, 1))				\
+		return 0;						\
+	return name##_atomic_dec_and_wlock__failed(a);			\
+ }
+
+#define DEFINE_BRLOCK(name)						\
+ DEFINE_PER_CPU(spinlock_t, name##_lock);				\
+ void name##_lock_init(void) {						\
+	int i;								\
+	for_each_possible_cpu(i) {					\
+		spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		spin_lock_init(lock);					\
+	}								\
+ }									\
+ void name##_wlock(void) {						\
+	int i;								\
+	for_each_online_cpu(i) {					\
+		spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		spin_lock(lock);					\
+	}								\
+ }									\
+ void name##_wunlock(void) {						\
+	int i;								\
+	for_each_online_cpu(i) {					\
+		spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		spin_unlock(lock);					\
+	}								\
+ }									\
+ int name##_atomic_dec_and_wlock__failed(atomic_t *a) {			\
+	name##_wlock();							\
+	if (!atomic_dec_and_test(a)) {					\
+		name##_wunlock();					\
+		return 0;						\
+	}								\
+	return 1;							\
+ }
+
+#else
+
+#define DECLARE_BRLOCK(name)						\
+ extern spinlock_t name##_lock;						\
+ static inline void name##_lock_init(void) {				\
+	spin_lock_init(&name##_lock);					\
+ }									\
+ static inline void name##_rlock(void) {				\
+	spin_lock(&name##_lock);					\
+ }									\
+ static inline void name##_runlock(void) {				\
+	spin_unlock(&name##_lock);					\
+ }									\
+ static inline void name##_wlock(void) {				\
+	spin_lock(&name##_lock);					\
+ }									\
+ static inline void name##_wunlock(void) {				\
+	spin_unlock(&name##_lock);					\
+ }									\
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) {		\
+	return atomic_dec_and_lock(a, &name##_lock);			\
+ }									\
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) {		\
+	return atomic_dec_and_lock(a, &name##_lock);			\
+ }
+
+#define DEFINE_BRLOCK(name)						\
+ spinlock_t name##_lock
+#endif
+
+#endif

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2010-03-17 20:33 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-03-16 12:22 [patch 1/2] kernel: introduce brlock Nick Piggin
2010-03-16 12:23 ` [patch 2/2] fs: scale vfsmount_lock Nick Piggin
2010-03-16 12:28   ` Nick Piggin
2010-03-17 14:20   ` Nick Piggin
2010-03-17 20:33     ` Andreas Dilger
2010-03-16 19:01 ` [patch 1/2] kernel: introduce brlock Andreas Dilger
2010-03-16 20:12   ` Frank Mayhar
2010-03-16 23:44   ` Nick Piggin
2010-03-17 14:18     ` Nick Piggin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).