From: npiggin@suse.de
To: Al Viro <viro@zeniv.linux.org.uk>
Cc: linux-fsdevel@vger.kernel.org, Ian Kent <raven@themaw.net>,
Linus Torvalds <torvalds@linux-foundation.org>,
linux-kernel@vger.kernel.org
Subject: [patch 4/6] brlock: introduce special brlocks
Date: Thu, 15 Oct 2009 15:40:30 +1100 [thread overview]
Message-ID: <20091015050048.777261867@suse.de> (raw)
In-Reply-To: 20091015044026.319860788@suse.de
[-- Attachment #1: kernel-introduce-brlock.patch --]
[-- Type: text/plain, Size: 3922 bytes --]
This patch introduces special brlocks, these can only be used as global
locks, and use some preprocessor trickery to allow us to retain a more
optimal per-cpu lock implementation. We don't bother working around
lockdep yet.
Signed-off-by: Nick Piggin <npiggin@suse.de>
---
include/linux/brlock.h | 112 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 112 insertions(+)
Index: linux-2.6/include/linux/brlock.h
===================================================================
--- /dev/null
+++ linux-2.6/include/linux/brlock.h
@@ -0,0 +1,112 @@
+/*
+ * Specialised big-reader spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * Copyright 2009, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_BRLOCK_H
+#define __LINUX_BRLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_LOCKDEP)
+#define DECLARE_BRLOCK(name) \
+ DECLARE_PER_CPU(spinlock_t, name##_lock); \
+ static inline void name##_lock_init(void) { \
+ int i; \
+ for_each_possible_cpu(i) { \
+ spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ spin_lock_init(lock); \
+ } \
+ } \
+ static inline void name##_rlock(void) { \
+ spinlock_t *lock; \
+ lock = &get_cpu_var(name##_lock); \
+ spin_lock(lock); \
+ } \
+ static inline void name##_runlock(void) { \
+ spinlock_t *lock; \
+ lock = &__get_cpu_var(name##_lock); \
+ spin_unlock(lock); \
+ put_cpu_var(name##_lock); \
+ } \
+ extern void name##_wlock(void); \
+ extern void name##_wunlock(void); \
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) { \
+ int ret; \
+ spinlock_t *lock; \
+ lock = &get_cpu_var(name##_lock); \
+ ret = atomic_dec_and_lock(a, lock); \
+ if (!ret) \
+ put_cpu_var(name##_lock); \
+ return ret; \
+ } \
+ extern int name##_atomic_dec_and_wlock__failed(atomic_t *a); \
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) { \
+ if (atomic_add_unless(a, -1, 1)) \
+ return 0; \
+ return name##_atomic_dec_and_wlock__failed(a); \
+ }
+
+#define DEFINE_BRLOCK(name) \
+ DEFINE_PER_CPU(spinlock_t, name##_lock); \
+ void name##_wlock(void) { \
+ int i; \
+ for_each_online_cpu(i) { \
+ spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ spin_lock(lock); \
+ } \
+ } \
+ void name##_wunlock(void) { \
+ int i; \
+ for_each_online_cpu(i) { \
+ spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ spin_unlock(lock); \
+ } \
+ } \
+ int name##_atomic_dec_and_wlock__failed(atomic_t *a) { \
+ name##_wlock(); \
+ if (!atomic_dec_and_test(a)) { \
+ name##_wunlock(); \
+ return 0; \
+ } \
+ return 1; \
+ }
+
+#else
+
+#define DECLARE_BRLOCK(name) \
+ spinlock_t name##_lock; \
+ static inline void name##_lock_init(void) { \
+ spin_lock_init(&name##_lock); \
+ } \
+ static inline void name##_rlock(void) { \
+ spin_lock(&name##_lock); \
+ } \
+ static inline void name##_runlock(void) { \
+ spin_unlock(&name##_lock); \
+ } \
+ static inline void name##_wlock(void) { \
+ spin_lock(&name##_lock); \
+ } \
+ static inline void name##_wunlock(void) { \
+ spin_unlock(&name##_lock); \
+ } \
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) { \
+ return atomic_dec_and_lock(a, &name##_lock); \
+ } \
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) { \
+ return atomic_dec_and_lock(a, &name##_lock); \
+ }
+
+#define DEFINE_BRLOCK(name) \
+ spinlock_t name##_lock
+#endif
+
+#endif
next prev parent reply other threads:[~2009-10-15 6:05 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-10-15 4:40 [patch 0/6] vfsmount scaling and other bits npiggin
2009-10-15 4:40 ` [patch 1/6] fs: invalidate sb->s_bdev on remount,ro npiggin
2009-10-15 4:40 ` [patch 2/6] fs: no games with DCACHE_UNHASHED npiggin
2009-10-15 6:31 ` David Miller
2009-10-15 7:44 ` Eric Dumazet
2009-10-15 8:13 ` Nick Piggin
2009-10-15 8:29 ` Nick Piggin
2009-10-15 9:13 ` Eric Dumazet
2009-10-15 13:20 ` Matthew Wilcox
2009-10-15 14:41 ` Nick Piggin
2009-10-15 4:40 ` [patch 3/6] fs: dcache remove d_mounted npiggin
2009-10-15 10:37 ` Ian Kent
2009-10-15 4:40 ` npiggin [this message]
2009-10-15 6:58 ` [rfc][patch 4a/6] brlock: "fast" brlocks Nick Piggin
2009-10-15 11:05 ` Peter Zijlstra
2009-10-15 11:26 ` Nick Piggin
2009-10-19 5:25 ` [patch 4/6] brlock: introduce special brlocks Andrew Morton
2009-10-19 9:49 ` Nick Piggin
2009-10-19 12:24 ` Andrew Morton
2009-10-19 12:48 ` Nick Piggin
2009-10-15 4:40 ` [patch 5/6] fs: brlock vfsmount_lock npiggin
2009-10-15 4:40 ` [patch 6/6] fs: scale mntget/mntput npiggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20091015050048.777261867@suse.de \
--to=npiggin@suse.de \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=raven@themaw.net \
--cc=torvalds@linux-foundation.org \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox