public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [patch 0/5] seqlock consolidation
@ 2012-03-15 11:44 Thomas Gleixner
  2012-03-15 11:44 ` [patch 1/5] seqlock: Remove unused functions Thomas Gleixner
                   ` (5 more replies)
  0 siblings, 6 replies; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 11:44 UTC (permalink / raw)
  To: LKML; +Cc: Al Viro, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

This series consists of two parts:

     1) Consolidate seqlocks and seqcount instead of having two
     	implementations for the counter functionality

     2) Extend seqlocks API and convert the open coded seqlocks
     	in fs_struct and dentry to that.

The main motivation of this change is to get rid of open coded
seqlocks as they are difficult to handle in the real time patch,
because the retry loops can cause live locks there, when the writer
side gets preempted. RT needs to take the lock so the writer gets
boosted and out of the way. With open coded seqlocks consisting of a
spinlock and a seqcount we have no idea which lock to acquire.

Aside of that replacing open coded constructs with proper functions is
a worthwhile cleanup by itself.

Thanks,

	tglx


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [patch 1/5] seqlock: Remove unused functions
  2012-03-15 11:44 [patch 0/5] seqlock consolidation Thomas Gleixner
@ 2012-03-15 11:44 ` Thomas Gleixner
  2012-03-15 16:29   ` Linus Torvalds
  2012-03-15 11:44 ` [patch 3/5] seqlock: Provide seq_spin_* functions Thomas Gleixner
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 11:44 UTC (permalink / raw)
  To: LKML; +Cc: Al Viro, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

[-- Attachment #1: seqlock-remove-unused-functions.patch --]
[-- Type: text/plain, Size: 1180 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/seqlock.h |   21 ---------------------
 1 file changed, 21 deletions(-)

Index: tip/include/linux/seqlock.h
===================================================================
--- tip.orig/include/linux/seqlock.h
+++ tip/include/linux/seqlock.h
@@ -69,17 +69,6 @@ static inline void write_sequnlock(seqlo
 	spin_unlock(&sl->lock);
 }
 
-static inline int write_tryseqlock(seqlock_t *sl)
-{
-	int ret = spin_trylock(&sl->lock);
-
-	if (ret) {
-		++sl->sequence;
-		smp_wmb();
-	}
-	return ret;
-}
-
 /* Start of read calculation -- fetch last complete writer token */
 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
 {
@@ -248,14 +237,4 @@ static inline void write_seqcount_barrie
 #define write_sequnlock_bh(lock)					\
 	do { write_sequnlock(lock); local_bh_enable(); } while(0)
 
-#define read_seqbegin_irqsave(lock, flags)				\
-	({ local_irq_save(flags);   read_seqbegin(lock); })
-
-#define read_seqretry_irqrestore(lock, iv, flags)			\
-	({								\
-		int ret = read_seqretry(lock, iv);			\
-		local_irq_restore(flags);				\
-		ret;							\
-	})
-
 #endif /* __LINUX_SEQLOCK_H */



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [patch 2/5] seqlock: Use seqcount for seqlock
  2012-03-15 11:44 [patch 0/5] seqlock consolidation Thomas Gleixner
  2012-03-15 11:44 ` [patch 1/5] seqlock: Remove unused functions Thomas Gleixner
  2012-03-15 11:44 ` [patch 3/5] seqlock: Provide seq_spin_* functions Thomas Gleixner
@ 2012-03-15 11:44 ` Thomas Gleixner
  2012-03-15 11:44 ` [patch 4/5] fs: fs_struct use seqlock Thomas Gleixner
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 11:44 UTC (permalink / raw)
  To: LKML; +Cc: Al Viro, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

[-- Attachment #1: seqlock-use-seqcount.patch --]
[-- Type: text/plain, Size: 5468 bytes --]

No point in having different implementations for the same thing.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/seqlock.h |  176 +++++++++++++++++++++++++-----------------------
 1 file changed, 93 insertions(+), 83 deletions(-)

Index: tip/include/linux/seqlock.h
===================================================================
--- tip.orig/include/linux/seqlock.h
+++ tip/include/linux/seqlock.h
@@ -30,81 +30,12 @@
 #include <linux/preempt.h>
 #include <asm/processor.h>
 
-typedef struct {
-	unsigned sequence;
-	spinlock_t lock;
-} seqlock_t;
-
-/*
- * These macros triggered gcc-3.x compile-time problems.  We think these are
- * OK now.  Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
-		 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
-
-#define seqlock_init(x)					\
-	do {						\
-		(x)->sequence = 0;			\
-		spin_lock_init(&(x)->lock);		\
-	} while (0)
-
-#define DEFINE_SEQLOCK(x) \
-		seqlock_t x = __SEQLOCK_UNLOCKED(x)
-
-/* Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
- */
-static inline void write_seqlock(seqlock_t *sl)
-{
-	spin_lock(&sl->lock);
-	++sl->sequence;
-	smp_wmb();
-}
-
-static inline void write_sequnlock(seqlock_t *sl)
-{
-	smp_wmb();
-	sl->sequence++;
-	spin_unlock(&sl->lock);
-}
-
-/* Start of read calculation -- fetch last complete writer token */
-static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
-{
-	unsigned ret;
-
-repeat:
-	ret = ACCESS_ONCE(sl->sequence);
-	if (unlikely(ret & 1)) {
-		cpu_relax();
-		goto repeat;
-	}
-	smp_rmb();
-
-	return ret;
-}
-
-/*
- * Test if reader processed invalid data.
- *
- * If sequence value changed then writer changed data while in section.
- */
-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
-{
-	smp_rmb();
-
-	return unlikely(sl->sequence != start);
-}
-
-
 /*
  * Version using sequence counter only.
  * This can be used when code has its own mutex protecting the
  * updating starting before the write_seqcountbeqin() and ending
  * after the write_seqcount_end().
  */
-
 typedef struct seqcount {
 	unsigned sequence;
 } seqcount_t;
@@ -186,7 +117,6 @@ static inline int __read_seqcount_retry(
 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
 	smp_rmb();
-
 	return __read_seqcount_retry(s, start);
 }
 
@@ -220,21 +150,101 @@ static inline void write_seqcount_barrie
 	s->sequence+=2;
 }
 
+typedef struct {
+	struct seqcount seqcount;
+	spinlock_t lock;
+} seqlock_t;
+
 /*
- * Possible sw/hw IRQ protected versions of the interfaces.
+ * These macros triggered gcc-3.x compile-time problems.  We think these are
+ * OK now.  Be cautious.
  */
+#define __SEQLOCK_UNLOCKED(lockname)			\
+	{						\
+		.seqcount = SEQCNT_ZERO,		\
+		.lock =	__SPIN_LOCK_UNLOCKED(lockname)	\
+	}
+
+#define seqlock_init(x)					\
+	do {						\
+		seqcount_init(&(x)->seqcount);		\
+		spin_lock_init(&(x)->lock);		\
+	} while (0)
+
+#define DEFINE_SEQLOCK(x) \
+		seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
+/*
+ * Read side functions for starting and finalizing a read side section.
+ */
+static inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+	return read_seqcount_begin(&sl->seqcount);
+}
+
+static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+{
+	return read_seqcount_retry(&sl->seqcount, start);
+}
+
+/*
+ * Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void write_seqlock(seqlock_t *sl)
+{
+	spin_lock(&sl->lock);
+	write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock(seqlock_t *sl)
+{
+	write_seqcount_end(&sl->seqcount);
+	spin_unlock(&sl->lock);
+}
+
+static inline void write_seqlock_bh(seqlock_t *sl)
+{
+	spin_lock_bh(&sl->lock);
+	write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_bh(seqlock_t *sl)
+{
+	write_seqcount_end(&sl->seqcount);
+	spin_unlock_bh(&sl->lock);
+}
+
+static inline void write_seqlock_irq(seqlock_t *sl)
+{
+	spin_lock_irq(&sl->lock);
+	write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_irq(seqlock_t *sl)
+{
+	write_seqcount_end(&sl->seqcount);
+	spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sl->lock, flags);
+	write_seqcount_begin(&sl->seqcount);
+	return flags;
+}
+
 #define write_seqlock_irqsave(lock, flags)				\
-	do { local_irq_save(flags); write_seqlock(lock); } while (0)
-#define write_seqlock_irq(lock)						\
-	do { local_irq_disable();   write_seqlock(lock); } while (0)
-#define write_seqlock_bh(lock)						\
-        do { local_bh_disable();    write_seqlock(lock); } while (0)
-
-#define write_sequnlock_irqrestore(lock, flags)				\
-	do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
-#define write_sequnlock_irq(lock)					\
-	do { write_sequnlock(lock); local_irq_enable(); } while(0)
-#define write_sequnlock_bh(lock)					\
-	do { write_sequnlock(lock); local_bh_enable(); } while(0)
+	do { flags = __write_seqlock_irqsave(lock); } while (0)
+
+static inline void
+write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+	write_seqcount_end(&sl->seqcount);
+	spin_unlock_irqrestore(&sl->lock, flags);
+}
 
 #endif /* __LINUX_SEQLOCK_H */



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [patch 3/5] seqlock: Provide seq_spin_* functions
  2012-03-15 11:44 [patch 0/5] seqlock consolidation Thomas Gleixner
  2012-03-15 11:44 ` [patch 1/5] seqlock: Remove unused functions Thomas Gleixner
@ 2012-03-15 11:44 ` Thomas Gleixner
  2012-03-15 11:44 ` [patch 2/5] seqlock: Use seqcount for seqlock Thomas Gleixner
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 11:44 UTC (permalink / raw)
  To: LKML; +Cc: Al Viro, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

[-- Attachment #1: seqlock-provide-seq-spin-lock.patch --]
[-- Type: text/plain, Size: 2410 bytes --]

In some cases it's desirable to lock the seqlock w/o changing the
seqcount. Provide functions for this, so we can avoid open coded
constructs.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/seqlock.h |   66 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 66 insertions(+)

Index: tip/include/linux/seqlock.h
===================================================================
--- tip.orig/include/linux/seqlock.h
+++ tip/include/linux/seqlock.h
@@ -176,6 +176,21 @@ typedef struct {
 
 /*
  * Read side functions for starting and finalizing a read side section.
+ * w/o barriers
+ */
+static inline unsigned __read_seqbegin(const seqlock_t *sl)
+{
+	return __read_seqcount_begin(&sl->seqcount);
+}
+
+static inline unsigned __read_seqretry(const seqlock_t *sl, unsigned start)
+{
+	return __read_seqcount_retry(&sl->seqcount, start);
+}
+
+/*
+ * Read side functions for starting and finalizing a read side section.
+ * with barriers
  */
 static inline unsigned read_seqbegin(const seqlock_t *sl)
 {
@@ -247,4 +262,55 @@ write_sequnlock_irqrestore(seqlock_t *sl
 	spin_unlock_irqrestore(&sl->lock, flags);
 }
 
+/*
+ * Instead of open coding a spinlock and a seqcount, the following
+ * functions allow to serialize on the seqlock w/o touching seqcount.
+ */
+static inline void seq_spin_lock(seqlock_t *sl)
+{
+	spin_lock(&sl->lock);
+}
+
+static inline int seq_spin_trylock(seqlock_t *sl)
+{
+	return spin_trylock(&sl->lock);
+}
+
+static inline void seq_spin_unlock(seqlock_t *sl)
+{
+	spin_unlock(&sl->lock);
+}
+
+static inline void assert_seq_spin_locked(seqlock_t *sl)
+{
+	lockdep_assert_held(&sl->lock);
+}
+
+static inline void seq_spin_lock_nested(seqlock_t *sl, int subclass)
+{
+	spin_lock_nested(&sl->lock, subclass);
+}
+
+/*
+ * For writers which need to take/release the lock w/o updating seqcount for
+ * whatever reasons the following functions allow to update the count
+ * after the lock has been acquired or before it is released.
+ */
+static inline void write_seqlock_begin(seqlock_t *sl)
+{
+	lockdep_assert_held(&sl->lock);
+	write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_seqlock_end(seqlock_t *sl)
+{
+	lockdep_assert_held(&sl->lock);
+	write_seqcount_end(&sl->seqcount);
+}
+
+static inline void write_seqlock_barrier(seqlock_t *sl)
+{
+	write_seqcount_barrier(&sl->seqcount);
+}
+
 #endif /* __LINUX_SEQLOCK_H */



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [patch 4/5] fs: fs_struct use seqlock
  2012-03-15 11:44 [patch 0/5] seqlock consolidation Thomas Gleixner
                   ` (2 preceding siblings ...)
  2012-03-15 11:44 ` [patch 2/5] seqlock: Use seqcount for seqlock Thomas Gleixner
@ 2012-03-15 11:44 ` Thomas Gleixner
  2012-03-15 11:44 ` [patch 5/5] fs: Use seqlock in struct dentry Thomas Gleixner
  2012-03-15 12:21 ` [patch 0/5] seqlock consolidation Al Viro
  5 siblings, 0 replies; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 11:44 UTC (permalink / raw)
  To: LKML; +Cc: Al Viro, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

[-- Attachment #1: fs-struct-use-seqlock.patch --]
[-- Type: text/plain, Size: 8706 bytes --]

Replace the open coded seqlock with a real one.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

---
 fs/exec.c                 |    4 ++--
 fs/fhandle.c              |    4 ++--
 fs/fs_struct.c            |   46 ++++++++++++++++++----------------------------
 fs/namei.c                |   14 +++++++-------
 include/linux/fs_struct.h |   16 +++++++---------
 kernel/fork.c             |   10 +++++-----
 6 files changed, 41 insertions(+), 53 deletions(-)

Index: tip/fs/exec.c
===================================================================
--- tip.orig/fs/exec.c
+++ tip/fs/exec.c
@@ -1244,7 +1244,7 @@ static int check_unsafe_exec(struct linu
 	}
 
 	n_fs = 1;
-	spin_lock(&p->fs->lock);
+	seq_spin_lock(&p->fs->lock);
 	rcu_read_lock();
 	for (t = next_thread(p); t != p; t = next_thread(t)) {
 		if (t->fs == p->fs)
@@ -1261,7 +1261,7 @@ static int check_unsafe_exec(struct linu
 			res = 1;
 		}
 	}
-	spin_unlock(&p->fs->lock);
+	seq_spin_unlock(&p->fs->lock);
 
 	return res;
 }
Index: tip/fs/fhandle.c
===================================================================
--- tip.orig/fs/fhandle.c
+++ tip/fs/fhandle.c
@@ -117,10 +117,10 @@ static struct vfsmount *get_vfsmount_fro
 
 	if (fd == AT_FDCWD) {
 		struct fs_struct *fs = current->fs;
-		spin_lock(&fs->lock);
+		seq_spin_lock(&fs->lock);
 		path = fs->pwd;
 		mntget(path.mnt);
-		spin_unlock(&fs->lock);
+		seq_spin_unlock(&fs->lock);
 	} else {
 		int fput_needed;
 		struct file *file = fget_light(fd, &fput_needed);
Index: tip/fs/fs_struct.c
===================================================================
--- tip.orig/fs/fs_struct.c
+++ tip/fs/fs_struct.c
@@ -26,13 +26,11 @@ void set_fs_root(struct fs_struct *fs, s
 {
 	struct path old_root;
 
-	spin_lock(&fs->lock);
-	write_seqcount_begin(&fs->seq);
+	write_seqlock(&fs->lock);
 	old_root = fs->root;
 	fs->root = *path;
 	path_get_longterm(path);
-	write_seqcount_end(&fs->seq);
-	spin_unlock(&fs->lock);
+	write_sequnlock(&fs->lock);
 	if (old_root.dentry)
 		path_put_longterm(&old_root);
 }
@@ -45,13 +43,11 @@ void set_fs_pwd(struct fs_struct *fs, st
 {
 	struct path old_pwd;
 
-	spin_lock(&fs->lock);
-	write_seqcount_begin(&fs->seq);
+	write_seqlock(&fs->lock);
 	old_pwd = fs->pwd;
 	fs->pwd = *path;
 	path_get_longterm(path);
-	write_seqcount_end(&fs->seq);
-	spin_unlock(&fs->lock);
+	write_sequnlock(&fs->lock);
 
 	if (old_pwd.dentry)
 		path_put_longterm(&old_pwd);
@@ -68,8 +64,7 @@ void chroot_fs_refs(struct path *old_roo
 		task_lock(p);
 		fs = p->fs;
 		if (fs) {
-			spin_lock(&fs->lock);
-			write_seqcount_begin(&fs->seq);
+			write_seqlock(&fs->lock);
 			if (fs->root.dentry == old_root->dentry
 			    && fs->root.mnt == old_root->mnt) {
 				path_get_longterm(new_root);
@@ -82,8 +77,7 @@ void chroot_fs_refs(struct path *old_roo
 				fs->pwd = *new_root;
 				count++;
 			}
-			write_seqcount_end(&fs->seq);
-			spin_unlock(&fs->lock);
+			write_sequnlock(&fs->lock);
 		}
 		task_unlock(p);
 	} while_each_thread(g, p);
@@ -106,12 +100,10 @@ void exit_fs(struct task_struct *tsk)
 	if (fs) {
 		int kill;
 		task_lock(tsk);
-		spin_lock(&fs->lock);
-		write_seqcount_begin(&fs->seq);
+		write_seqlock(&fs->lock);
 		tsk->fs = NULL;
 		kill = !--fs->users;
-		write_seqcount_end(&fs->seq);
-		spin_unlock(&fs->lock);
+		write_sequnlock(&fs->lock);
 		task_unlock(tsk);
 		if (kill)
 			free_fs_struct(fs);
@@ -125,16 +117,15 @@ struct fs_struct *copy_fs_struct(struct 
 	if (fs) {
 		fs->users = 1;
 		fs->in_exec = 0;
-		spin_lock_init(&fs->lock);
-		seqcount_init(&fs->seq);
+		seqlock_init(&fs->lock);
 		fs->umask = old->umask;
 
-		spin_lock(&old->lock);
+		seq_spin_lock(&old->lock);
 		fs->root = old->root;
 		path_get_longterm(&fs->root);
 		fs->pwd = old->pwd;
 		path_get_longterm(&fs->pwd);
-		spin_unlock(&old->lock);
+		seq_spin_unlock(&old->lock);
 	}
 	return fs;
 }
@@ -149,10 +140,10 @@ int unshare_fs_struct(void)
 		return -ENOMEM;
 
 	task_lock(current);
-	spin_lock(&fs->lock);
+	seq_spin_lock(&fs->lock);
 	kill = !--fs->users;
 	current->fs = new_fs;
-	spin_unlock(&fs->lock);
+	seq_spin_unlock(&fs->lock);
 	task_unlock(current);
 
 	if (kill)
@@ -171,8 +162,7 @@ EXPORT_SYMBOL(current_umask);
 /* to be mentioned only in INIT_TASK */
 struct fs_struct init_fs = {
 	.users		= 1,
-	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
-	.seq		= SEQCNT_ZERO,
+	.lock		= __SEQLOCK_UNLOCKED(init_fs.lock),
 	.umask		= 0022,
 };
 
@@ -185,14 +175,14 @@ void daemonize_fs_struct(void)
 
 		task_lock(current);
 
-		spin_lock(&init_fs.lock);
+		seq_spin_lock(&init_fs.lock);
 		init_fs.users++;
-		spin_unlock(&init_fs.lock);
+		seq_spin_unlock(&init_fs.lock);
 
-		spin_lock(&fs->lock);
+		seq_spin_lock(&fs->lock);
 		current->fs = &init_fs;
 		kill = !--fs->users;
-		spin_unlock(&fs->lock);
+		seq_spin_unlock(&fs->lock);
 
 		task_unlock(current);
 		if (kill)
Index: tip/fs/namei.c
===================================================================
--- tip.orig/fs/namei.c
+++ tip/fs/namei.c
@@ -427,7 +427,7 @@ static int unlazy_walk(struct nameidata 
 	BUG_ON(!(nd->flags & LOOKUP_RCU));
 	if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
 		want_root = 1;
-		spin_lock(&fs->lock);
+		seq_spin_lock(&fs->lock);
 		if (nd->root.mnt != fs->root.mnt ||
 				nd->root.dentry != fs->root.dentry)
 			goto err_root;
@@ -457,7 +457,7 @@ static int unlazy_walk(struct nameidata 
 	spin_unlock(&parent->d_lock);
 	if (want_root) {
 		path_get(&nd->root);
-		spin_unlock(&fs->lock);
+		seq_spin_unlock(&fs->lock);
 	}
 	mntget(nd->path.mnt);
 
@@ -472,7 +472,7 @@ err_parent:
 	spin_unlock(&parent->d_lock);
 err_root:
 	if (want_root)
-		spin_unlock(&fs->lock);
+		seq_spin_unlock(&fs->lock);
 	return -ECHILD;
 }
 
@@ -566,10 +566,10 @@ static __always_inline void set_root_rcu
 		unsigned seq;
 
 		do {
-			seq = read_seqcount_begin(&fs->seq);
+			seq = read_seqbegin(&fs->lock);
 			nd->root = fs->root;
 			nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
-		} while (read_seqcount_retry(&fs->seq, seq));
+		} while (read_seqretry(&fs->lock, seq));
 	}
 }
 
@@ -1545,10 +1545,10 @@ static int path_init(int dfd, const char
 			rcu_read_lock();
 
 			do {
-				seq = read_seqcount_begin(&fs->seq);
+				seq = read_seqbegin(&fs->lock);
 				nd->path = fs->pwd;
 				nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-			} while (read_seqcount_retry(&fs->seq, seq));
+			} while (read_seqretry(&fs->lock, seq));
 		} else {
 			get_fs_pwd(current->fs, &nd->path);
 		}
Index: tip/include/linux/fs_struct.h
===================================================================
--- tip.orig/include/linux/fs_struct.h
+++ tip/include/linux/fs_struct.h
@@ -2,13 +2,11 @@
 #define _LINUX_FS_STRUCT_H
 
 #include <linux/path.h>
-#include <linux/spinlock.h>
 #include <linux/seqlock.h>
 
 struct fs_struct {
 	int users;
-	spinlock_t lock;
-	seqcount_t seq;
+	seqlock_t lock;
 	int umask;
 	int in_exec;
 	struct path root, pwd;
@@ -26,29 +24,29 @@ extern int unshare_fs_struct(void);
 
 static inline void get_fs_root(struct fs_struct *fs, struct path *root)
 {
-	spin_lock(&fs->lock);
+	seq_spin_lock(&fs->lock);
 	*root = fs->root;
 	path_get(root);
-	spin_unlock(&fs->lock);
+	seq_spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
 {
-	spin_lock(&fs->lock);
+	seq_spin_lock(&fs->lock);
 	*pwd = fs->pwd;
 	path_get(pwd);
-	spin_unlock(&fs->lock);
+	seq_spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
 				       struct path *pwd)
 {
-	spin_lock(&fs->lock);
+	seq_spin_lock(&fs->lock);
 	*root = fs->root;
 	path_get(root);
 	*pwd = fs->pwd;
 	path_get(pwd);
-	spin_unlock(&fs->lock);
+	seq_spin_unlock(&fs->lock);
 }
 
 #endif /* _LINUX_FS_STRUCT_H */
Index: tip/kernel/fork.c
===================================================================
--- tip.orig/kernel/fork.c
+++ tip/kernel/fork.c
@@ -876,13 +876,13 @@ static int copy_fs(unsigned long clone_f
 	struct fs_struct *fs = current->fs;
 	if (clone_flags & CLONE_FS) {
 		/* tsk->fs is already what we want */
-		spin_lock(&fs->lock);
+		seq_spin_lock(&fs->lock);
 		if (fs->in_exec) {
-			spin_unlock(&fs->lock);
+			seq_spin_unlock(&fs->lock);
 			return -EAGAIN;
 		}
 		fs->users++;
-		spin_unlock(&fs->lock);
+		seq_spin_unlock(&fs->lock);
 		return 0;
 	}
 	tsk->fs = copy_fs_struct(fs);
@@ -1757,13 +1757,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, 
 
 		if (new_fs) {
 			fs = current->fs;
-			spin_lock(&fs->lock);
+			seq_spin_lock(&fs->lock);
 			current->fs = new_fs;
 			if (--fs->users)
 				new_fs = NULL;
 			else
 				new_fs = fs;
-			spin_unlock(&fs->lock);
+			seq_spin_unlock(&fs->lock);
 		}
 
 		if (new_fd) {



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [patch 5/5] fs: Use seqlock in struct dentry
  2012-03-15 11:44 [patch 0/5] seqlock consolidation Thomas Gleixner
                   ` (3 preceding siblings ...)
  2012-03-15 11:44 ` [patch 4/5] fs: fs_struct use seqlock Thomas Gleixner
@ 2012-03-15 11:44 ` Thomas Gleixner
  2012-03-15 12:21 ` [patch 0/5] seqlock consolidation Al Viro
  5 siblings, 0 replies; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 11:44 UTC (permalink / raw)
  To: LKML; +Cc: Al Viro, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

[-- Attachment #1: fs-dentry-use-seqlock.patch --]
[-- Type: text/plain, Size: 93823 bytes --]

Replace the open coded seqlock with a real seqlock. This reorders
dentry members slightly to keep the structure size unchanged.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

---
 arch/powerpc/platforms/cell/spufs/inode.c |    6 
 drivers/infiniband/hw/ipath/ipath_fs.c    |    6 
 drivers/infiniband/hw/qib/qib_fs.c        |    6 
 drivers/usb/core/inode.c                  |   12 -
 fs/9p/vfs_dir.c                           |    4 
 fs/afs/dir.c                              |    4 
 fs/autofs4/autofs_i.h                     |   24 +-
 fs/autofs4/expire.c                       |   48 ++---
 fs/autofs4/root.c                         |   38 ++--
 fs/btrfs/export.c                         |    4 
 fs/btrfs/inode.c                          |    4 
 fs/ceph/caps.c                            |    8 
 fs/ceph/debugfs.c                         |    8 
 fs/ceph/dir.c                             |   30 +--
 fs/ceph/export.c                          |    4 
 fs/ceph/inode.c                           |   20 +-
 fs/ceph/mds_client.c                      |   18 +-
 fs/cifs/dir.c                             |    6 
 fs/coda/cache.c                           |    4 
 fs/configfs/configfs_internal.h           |    4 
 fs/configfs/inode.c                       |    6 
 fs/dcache.c                               |  253 ++++++++++++++----------------
 fs/dcookies.c                             |    8 
 fs/exportfs/expfs.c                       |   12 -
 fs/fat/inode.c                            |    4 
 fs/fat/namei_vfat.c                       |    4 
 fs/fs-writeback.c                         |    4 
 fs/fuse/inode.c                           |    4 
 fs/gfs2/export.c                          |    4 
 fs/isofs/export.c                         |    4 
 fs/libfs.c                                |   36 ++--
 fs/namei.c                                |   42 ++--
 fs/namespace.c                            |    8 
 fs/ncpfs/dir.c                            |    6 
 fs/ncpfs/ncplib_kernel.h                  |    8 
 fs/nfs/dir.c                              |    6 
 fs/nfs/getroot.c                          |   12 -
 fs/nfs/namespace.c                        |   16 -
 fs/nfs/unlink.c                           |   20 +-
 fs/nilfs2/namei.c                         |    4 
 fs/notify/fsnotify.c                      |    8 
 fs/notify/vfsmount_mark.c                 |   24 +-
 fs/ocfs2/dcache.c                         |    6 
 fs/ocfs2/export.c                         |    4 
 fs/reiserfs/inode.c                       |    4 
 fs/udf/namei.c                            |    4 
 fs/xfs/xfs_export.c                       |    8 
 include/linux/dcache.h                    |   17 --
 include/linux/fs.h                        |    4 
 include/linux/fsnotify_backend.h          |    6 
 kernel/cgroup.c                           |   22 +-
 net/sunrpc/rpc_pipe.c                     |    6 
 security/selinux/selinuxfs.c              |   14 -
 53 files changed, 421 insertions(+), 425 deletions(-)

Index: tip/arch/powerpc/platforms/cell/spufs/inode.c
===================================================================
--- tip.orig/arch/powerpc/platforms/cell/spufs/inode.c
+++ tip/arch/powerpc/platforms/cell/spufs/inode.c
@@ -164,18 +164,18 @@ static void spufs_prune_dir(struct dentr
 
 	mutex_lock(&dir->d_inode->i_mutex);
 	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (!(d_unhashed(dentry)) && dentry->d_inode) {
 			dget_dlock(dentry);
 			__d_drop(dentry);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			simple_unlink(dir->d_inode, dentry);
 			/* XXX: what was dcache_lock protecting here? Other
 			 * filesystems (IB, configfs) release dcache_lock
 			 * before unlink */
 			dput(dentry);
 		} else {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 		}
 	}
 	shrink_dcache_parent(dir);
Index: tip/drivers/infiniband/hw/ipath/ipath_fs.c
===================================================================
--- tip.orig/drivers/infiniband/hw/ipath/ipath_fs.c
+++ tip/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,14 +277,14 @@ static int remove_file(struct dentry *pa
 		goto bail;
 	}
 
-	spin_lock(&tmp->d_lock);
+	seq_spin_lock(&tmp->d_lock);
 	if (!(d_unhashed(tmp) && tmp->d_inode)) {
 		dget_dlock(tmp);
 		__d_drop(tmp);
-		spin_unlock(&tmp->d_lock);
+		seq_spin_unlock(&tmp->d_lock);
 		simple_unlink(parent->d_inode, tmp);
 	} else
-		spin_unlock(&tmp->d_lock);
+		seq_spin_unlock(&tmp->d_lock);
 
 	ret = 0;
 bail:
Index: tip/drivers/infiniband/hw/qib/qib_fs.c
===================================================================
--- tip.orig/drivers/infiniband/hw/qib/qib_fs.c
+++ tip/drivers/infiniband/hw/qib/qib_fs.c
@@ -453,14 +453,14 @@ static int remove_file(struct dentry *pa
 		goto bail;
 	}
 
-	spin_lock(&tmp->d_lock);
+	seq_spin_lock(&tmp->d_lock);
 	if (!(d_unhashed(tmp) && tmp->d_inode)) {
 		dget_dlock(tmp);
 		__d_drop(tmp);
-		spin_unlock(&tmp->d_lock);
+		seq_spin_unlock(&tmp->d_lock);
 		simple_unlink(parent->d_inode, tmp);
 	} else {
-		spin_unlock(&tmp->d_lock);
+		seq_spin_unlock(&tmp->d_lock);
 	}
 
 	ret = 0;
Index: tip/drivers/usb/core/inode.c
===================================================================
--- tip.orig/drivers/usb/core/inode.c
+++ tip/drivers/usb/core/inode.c
@@ -341,19 +341,19 @@ static int usbfs_empty (struct dentry *d
 {
 	struct list_head *list;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	list_for_each(list, &dentry->d_subdirs) {
 		struct dentry *de = list_entry(list, struct dentry, d_u.d_child);
 
-		spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
 		if (usbfs_positive(de)) {
-			spin_unlock(&de->d_lock);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&de->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			return 0;
 		}
-		spin_unlock(&de->d_lock);
+		seq_spin_unlock(&de->d_lock);
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return 1;
 }
 
Index: tip/fs/9p/vfs_dir.c
===================================================================
--- tip.orig/fs/9p/vfs_dir.c
+++ tip/fs/9p/vfs_dir.c
@@ -107,7 +107,7 @@ static int v9fs_alloc_rdir_buf(struct fi
 			err = -ENOMEM;
 			goto exit;
 		}
-		spin_lock(&filp->f_dentry->d_lock);
+		seq_spin_lock(&filp->f_dentry->d_lock);
 		if (!fid->rdir) {
 			rdir->buf = (uint8_t *)rdir + sizeof(struct p9_rdir);
 			mutex_init(&rdir->mutex);
@@ -115,7 +115,7 @@ static int v9fs_alloc_rdir_buf(struct fi
 			fid->rdir = (void *) rdir;
 			rdir = NULL;
 		}
-		spin_unlock(&filp->f_dentry->d_lock);
+		seq_spin_unlock(&filp->f_dentry->d_lock);
 		kfree(rdir);
 	}
 exit:
Index: tip/fs/afs/dir.c
===================================================================
--- tip.orig/fs/afs/dir.c
+++ tip/fs/afs/dir.c
@@ -705,9 +705,9 @@ out_skip:
 
 	/* the dirent, if it exists, now points to a different vnode */
 not_found:
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	dentry->d_flags |= DCACHE_NFSFS_RENAMED;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 out_bad:
 	if (dentry->d_inode) {
Index: tip/fs/autofs4/autofs_i.h
===================================================================
--- tip.orig/fs/autofs4/autofs_i.h
+++ tip/fs/autofs4/autofs_i.h
@@ -199,9 +199,9 @@ static inline void __managed_dentry_set_
 
 static inline void managed_dentry_set_automount(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__managed_dentry_set_automount(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 static inline void __managed_dentry_clear_automount(struct dentry *dentry)
@@ -211,9 +211,9 @@ static inline void __managed_dentry_clea
 
 static inline void managed_dentry_clear_automount(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__managed_dentry_clear_automount(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 static inline void __managed_dentry_set_transit(struct dentry *dentry)
@@ -223,9 +223,9 @@ static inline void __managed_dentry_set_
 
 static inline void managed_dentry_set_transit(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__managed_dentry_set_transit(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 static inline void __managed_dentry_clear_transit(struct dentry *dentry)
@@ -235,9 +235,9 @@ static inline void __managed_dentry_clea
 
 static inline void managed_dentry_clear_transit(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__managed_dentry_clear_transit(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 static inline void __managed_dentry_set_managed(struct dentry *dentry)
@@ -247,9 +247,9 @@ static inline void __managed_dentry_set_
 
 static inline void managed_dentry_set_managed(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__managed_dentry_set_managed(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 static inline void __managed_dentry_clear_managed(struct dentry *dentry)
@@ -259,9 +259,9 @@ static inline void __managed_dentry_clea
 
 static inline void managed_dentry_clear_managed(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__managed_dentry_clear_managed(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 /* Initializing function */
Index: tip/fs/autofs4/expire.c
===================================================================
--- tip.orig/fs/autofs4/expire.c
+++ tip/fs/autofs4/expire.c
@@ -99,7 +99,7 @@ static struct dentry *get_next_positive_
 	spin_lock(&sbi->lookup_lock);
 
 	if (prev == NULL) {
-		spin_lock(&root->d_lock);
+		seq_spin_lock(&root->d_lock);
 		prev = dget_dlock(root);
 		next = prev->d_subdirs.next;
 		p = prev;
@@ -107,12 +107,12 @@ static struct dentry *get_next_positive_
 	}
 
 	p = prev;
-	spin_lock(&p->d_lock);
+	seq_spin_lock(&p->d_lock);
 again:
 	next = p->d_u.d_child.next;
 start:
 	if (next == &root->d_subdirs) {
-		spin_unlock(&p->d_lock);
+		seq_spin_unlock(&p->d_lock);
 		spin_unlock(&sbi->lookup_lock);
 		dput(prev);
 		return NULL;
@@ -120,17 +120,17 @@ start:
 
 	q = list_entry(next, struct dentry, d_u.d_child);
 
-	spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
+	seq_spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
 	/* Negative dentry - try next */
 	if (!simple_positive(q)) {
-		spin_unlock(&p->d_lock);
-		lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_);
+		seq_spin_unlock(&p->d_lock);
+		lock_set_subclass(&q->d_lock.lock.dep_map, 0, _RET_IP_);
 		p = q;
 		goto again;
 	}
 	dget_dlock(q);
-	spin_unlock(&q->d_lock);
-	spin_unlock(&p->d_lock);
+	seq_spin_unlock(&q->d_lock);
+	seq_spin_unlock(&p->d_lock);
 	spin_unlock(&sbi->lookup_lock);
 
 	dput(prev);
@@ -154,7 +154,7 @@ static struct dentry *get_next_positive_
 	spin_lock(&sbi->lookup_lock);
 relock:
 	p = prev;
-	spin_lock(&p->d_lock);
+	seq_spin_lock(&p->d_lock);
 again:
 	next = p->d_subdirs.next;
 	if (next == &p->d_subdirs) {
@@ -162,19 +162,19 @@ again:
 			struct dentry *parent;
 
 			if (p == root) {
-				spin_unlock(&p->d_lock);
+				seq_spin_unlock(&p->d_lock);
 				spin_unlock(&sbi->lookup_lock);
 				dput(prev);
 				return NULL;
 			}
 
 			parent = p->d_parent;
-			if (!spin_trylock(&parent->d_lock)) {
-				spin_unlock(&p->d_lock);
+			if (!seq_spin_trylock(&parent->d_lock)) {
+				seq_spin_unlock(&p->d_lock);
 				cpu_relax();
 				goto relock;
 			}
-			spin_unlock(&p->d_lock);
+			seq_spin_unlock(&p->d_lock);
 			next = p->d_u.d_child.next;
 			p = parent;
 			if (next != &parent->d_subdirs)
@@ -183,17 +183,17 @@ again:
 	}
 	ret = list_entry(next, struct dentry, d_u.d_child);
 
-	spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
+	seq_spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
 	/* Negative dentry - try next */
 	if (!simple_positive(ret)) {
-		spin_unlock(&p->d_lock);
-		lock_set_subclass(&ret->d_lock.dep_map, 0, _RET_IP_);
+		seq_spin_unlock(&p->d_lock);
+		lock_set_subclass(&ret->d_lock.lock.dep_map, 0, _RET_IP_);
 		p = ret;
 		goto again;
 	}
 	dget_dlock(ret);
-	spin_unlock(&ret->d_lock);
-	spin_unlock(&p->d_lock);
+	seq_spin_unlock(&ret->d_lock);
+	seq_spin_unlock(&p->d_lock);
 	spin_unlock(&sbi->lookup_lock);
 
 	dput(prev);
@@ -464,11 +464,11 @@ found:
 	init_completion(&ino->expire_complete);
 	spin_unlock(&sbi->fs_lock);
 	spin_lock(&sbi->lookup_lock);
-	spin_lock(&expired->d_parent->d_lock);
-	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
+	seq_spin_lock(&expired->d_parent->d_lock);
+	seq_spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
 	list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
-	spin_unlock(&expired->d_lock);
-	spin_unlock(&expired->d_parent->d_lock);
+	seq_spin_unlock(&expired->d_lock);
+	seq_spin_unlock(&expired->d_parent->d_lock);
 	spin_unlock(&sbi->lookup_lock);
 	return expired;
 }
@@ -558,7 +558,7 @@ int autofs4_do_expire_multi(struct super
 
 		spin_lock(&sbi->fs_lock);
 		ino->flags &= ~AUTOFS_INF_EXPIRING;
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (!ret) {
 			if ((IS_ROOT(dentry) ||
 			    (autofs_type_indirect(sbi->type) &&
@@ -566,7 +566,7 @@ int autofs4_do_expire_multi(struct super
 			    !(dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
 				__managed_dentry_set_automount(dentry);
 		}
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		complete_all(&ino->expire_complete);
 		spin_unlock(&sbi->fs_lock);
 		dput(dentry);
Index: tip/fs/autofs4/root.c
===================================================================
--- tip.orig/fs/autofs4/root.c
+++ tip/fs/autofs4/root.c
@@ -124,13 +124,13 @@ static int autofs4_dir_open(struct inode
 	 * it.
 	 */
 	spin_lock(&sbi->lookup_lock);
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		spin_unlock(&sbi->lookup_lock);
 		return -ENOENT;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	spin_unlock(&sbi->lookup_lock);
 
 out:
@@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_act
 		ino = list_entry(p, struct autofs_info, active);
 		active = ino->dentry;
 
-		spin_lock(&active->d_lock);
+		seq_spin_lock(&active->d_lock);
 
 		/* Already gone? */
 		if (active->d_count == 0)
@@ -199,12 +199,12 @@ static struct dentry *autofs4_lookup_act
 
 		if (d_unhashed(active)) {
 			dget_dlock(active);
-			spin_unlock(&active->d_lock);
+			seq_spin_unlock(&active->d_lock);
 			spin_unlock(&sbi->lookup_lock);
 			return active;
 		}
 next:
-		spin_unlock(&active->d_lock);
+		seq_spin_unlock(&active->d_lock);
 	}
 	spin_unlock(&sbi->lookup_lock);
 
@@ -231,7 +231,7 @@ static struct dentry *autofs4_lookup_exp
 		ino = list_entry(p, struct autofs_info, expiring);
 		expiring = ino->dentry;
 
-		spin_lock(&expiring->d_lock);
+		seq_spin_lock(&expiring->d_lock);
 
 		/* Bad luck, we've already been dentry_iput */
 		if (!expiring->d_inode)
@@ -251,12 +251,12 @@ static struct dentry *autofs4_lookup_exp
 
 		if (d_unhashed(expiring)) {
 			dget_dlock(expiring);
-			spin_unlock(&expiring->d_lock);
+			seq_spin_unlock(&expiring->d_lock);
 			spin_unlock(&sbi->lookup_lock);
 			return expiring;
 		}
 next:
-		spin_unlock(&expiring->d_lock);
+		seq_spin_unlock(&expiring->d_lock);
 	}
 	spin_unlock(&sbi->lookup_lock);
 
@@ -382,12 +382,12 @@ static struct vfsmount *autofs4_d_automo
 			if (have_submounts(dentry))
 				goto done;
 		} else {
-			spin_lock(&dentry->d_lock);
+			seq_spin_lock(&dentry->d_lock);
 			if (!list_empty(&dentry->d_subdirs)) {
-				spin_unlock(&dentry->d_lock);
+				seq_spin_unlock(&dentry->d_lock);
 				goto done;
 			}
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 		}
 		ino->flags |= AUTOFS_INF_PENDING;
 		spin_unlock(&sbi->fs_lock);
@@ -410,12 +410,12 @@ done:
 		 * an actual mount so ->d_automount() won't be called during
 		 * the follow.
 		 */
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if ((!d_mountpoint(dentry) &&
 		    !list_empty(&dentry->d_subdirs)) ||
 		    (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
 			__managed_dentry_clear_automount(dentry);
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 	spin_unlock(&sbi->fs_lock);
 
@@ -597,9 +597,9 @@ static int autofs4_dir_unlink(struct ino
 
 	spin_lock(&sbi->lookup_lock);
 	__autofs4_add_expiring(dentry);
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__d_drop(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	spin_unlock(&sbi->lookup_lock);
 
 	return 0;
@@ -670,15 +670,15 @@ static int autofs4_dir_rmdir(struct inod
 		return -EACCES;
 
 	spin_lock(&sbi->lookup_lock);
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (!list_empty(&dentry->d_subdirs)) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		spin_unlock(&sbi->lookup_lock);
 		return -ENOTEMPTY;
 	}
 	__autofs4_add_expiring(dentry);
 	__d_drop(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	spin_unlock(&sbi->lookup_lock);
 
 	if (sbi->version < 5)
Index: tip/fs/btrfs/export.c
===================================================================
--- tip.orig/fs/btrfs/export.c
+++ tip/fs/btrfs/export.c
@@ -40,14 +40,14 @@ static int btrfs_encode_fh(struct dentry
 		struct inode *parent;
 		u64 parent_root_id;
 
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 
 		parent = dentry->d_parent->d_inode;
 		fid->parent_objectid = BTRFS_I(parent)->location.objectid;
 		fid->parent_gen = parent->i_generation;
 		parent_root_id = BTRFS_I(parent)->root->objectid;
 
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 
 		if (parent_root_id != fid->root_objectid) {
 			fid->parent_root_objectid = parent_root_id;
Index: tip/fs/btrfs/inode.c
===================================================================
--- tip.orig/fs/btrfs/inode.c
+++ tip/fs/btrfs/inode.c
@@ -4014,9 +4014,9 @@ static struct dentry *btrfs_lookup(struc
 
 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
 	if (unlikely(d_need_lookup(dentry))) {
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 	return ret;
 }
Index: tip/fs/ceph/caps.c
===================================================================
--- tip.orig/fs/ceph/caps.c
+++ tip/fs/ceph/caps.c
@@ -3066,14 +3066,14 @@ int ceph_encode_dentry_release(void **p,
 	 * doesn't have to be perfect; the mds will revoke anything we don't
 	 * release.
 	 */
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (di->lease_session && di->lease_session->s_mds == mds)
 		force = 1;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (ret && di->lease_session && di->lease_session->s_mds == mds) {
 		dout("encode_dentry_release %p mds%d seq %d\n",
 		     dentry, mds, (int)di->lease_seq);
@@ -3083,6 +3083,6 @@ int ceph_encode_dentry_release(void **p,
 		rel->dname_seq = cpu_to_le32(di->lease_seq);
 		__ceph_mdsc_drop_dentry_lease(dentry);
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return ret;
 }
Index: tip/fs/ceph/debugfs.c
===================================================================
--- tip.orig/fs/ceph/debugfs.c
+++ tip/fs/ceph/debugfs.c
@@ -82,13 +82,13 @@ static int mdsc_show(struct seq_file *s,
 						    &pathbase, 0);
 			if (IS_ERR(path))
 				path = NULL;
-			spin_lock(&req->r_dentry->d_lock);
+			seq_spin_lock(&req->r_dentry->d_lock);
 			seq_printf(s, " #%llx/%.*s (%s)",
 				   ceph_ino(req->r_dentry->d_parent->d_inode),
 				   req->r_dentry->d_name.len,
 				   req->r_dentry->d_name.name,
 				   path ? path : "");
-			spin_unlock(&req->r_dentry->d_lock);
+			seq_spin_unlock(&req->r_dentry->d_lock);
 			kfree(path);
 		} else if (req->r_path1) {
 			seq_printf(s, " #%llx/%s", req->r_ino1.ino,
@@ -100,13 +100,13 @@ static int mdsc_show(struct seq_file *s,
 						    &pathbase, 0);
 			if (IS_ERR(path))
 				path = NULL;
-			spin_lock(&req->r_old_dentry->d_lock);
+			seq_spin_lock(&req->r_old_dentry->d_lock);
 			seq_printf(s, " #%llx/%.*s (%s)",
 			   ceph_ino(req->r_old_dentry_dir),
 				   req->r_old_dentry->d_name.len,
 				   req->r_old_dentry->d_name.name,
 				   path ? path : "");
-			spin_unlock(&req->r_old_dentry->d_lock);
+			seq_spin_unlock(&req->r_old_dentry->d_lock);
 			kfree(path);
 		} else if (req->r_path2) {
 			if (req->r_ino2.ino)
Index: tip/fs/ceph/dir.c
===================================================================
--- tip.orig/fs/ceph/dir.c
+++ tip/fs/ceph/dir.c
@@ -44,7 +44,7 @@ int ceph_init_dentry(struct dentry *dent
 	if (!di)
 		return -ENOMEM;          /* oh well */
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_fsdata) {
 		/* lost a race */
 		kmem_cache_free(ceph_dentry_cachep, di);
@@ -67,7 +67,7 @@ int ceph_init_dentry(struct dentry *dent
 	dentry->d_fsdata = di;
 	ceph_dentry_lru_add(dentry);
 out_unlock:
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return 0;
 }
 
@@ -78,12 +78,12 @@ struct inode *ceph_get_dentry_parent_ino
 	if (!dentry)
 		return NULL;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_parent) {
 		inode = dentry->d_parent->d_inode;
 		ihold(inode);
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return inode;
 }
 
@@ -130,7 +130,7 @@ static int __dcache_readdir(struct file 
 	dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
 	     last);
 
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 
 	/* start at beginning? */
 	if (filp->f_pos == 2 || last == NULL ||
@@ -154,7 +154,7 @@ more:
 			fi->flags |= CEPH_F_ATEND;
 			goto out_unlock;
 		}
-		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 		if (!d_unhashed(dentry) && dentry->d_inode &&
 		    ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
 		    ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
@@ -164,15 +164,15 @@ more:
 		     dentry->d_name.len, dentry->d_name.name, di->offset,
 		     filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
 		     !dentry->d_inode ? " null" : "");
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		p = p->prev;
 		dentry = list_entry(p, struct dentry, d_u.d_child);
 		di = ceph_dentry(dentry);
 	}
 
 	dget_dlock(dentry);
-	spin_unlock(&dentry->d_lock);
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 
 	dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
 	     dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
@@ -205,12 +205,12 @@ more:
 		goto out;
 	}
 
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 	p = p->prev;	/* advance to next dentry */
 	goto more;
 
 out_unlock:
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 out:
 	if (last)
 		dput(last);
@@ -950,10 +950,10 @@ static int ceph_rename(struct inode *old
  */
 void ceph_invalidate_dentry_lease(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	dentry->d_time = jiffies;
 	ceph_dentry(dentry)->lease_shared_gen = 0;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 /*
@@ -971,7 +971,7 @@ static int dentry_lease_is_valid(struct 
 	struct inode *dir = NULL;
 	u32 seq = 0;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	di = ceph_dentry(dentry);
 	if (di->lease_session) {
 		s = di->lease_session;
@@ -995,7 +995,7 @@ static int dentry_lease_is_valid(struct 
 			}
 		}
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	if (session) {
 		ceph_mdsc_lease_send_msg(session, dir, dentry,
Index: tip/fs/ceph/export.c
===================================================================
--- tip.orig/fs/ceph/export.c
+++ tip/fs/ceph/export.c
@@ -55,7 +55,7 @@ static int ceph_encode_fh(struct dentry 
 	if (ceph_snap(inode) != CEPH_NOSNAP)
 		return -EINVAL;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	parent = dentry->d_parent;
 	if (*max_len >= connected_handle_length) {
 		dout("encode_fh %p connectable\n", dentry);
@@ -79,7 +79,7 @@ static int ceph_encode_fh(struct dentry 
 		*max_len = handle_length;
 		type = 255;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return type;
 }
 
Index: tip/fs/ceph/inode.c
===================================================================
--- tip.orig/fs/ceph/inode.c
+++ tip/fs/ceph/inode.c
@@ -808,7 +808,7 @@ static void update_dentry_lease(struct d
 	if (dentry->d_op != &ceph_dentry_ops)
 		return;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
 	     dentry, duration, ttl);
 
@@ -836,7 +836,7 @@ static void update_dentry_lease(struct d
 	di->lease_renew_from = 0;
 	dentry->d_time = ttl;
 out_unlock:
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return;
 }
 
@@ -866,13 +866,13 @@ static void ceph_set_dentry_offset(struc
 	di->offset = ceph_inode(inode)->i_max_offset++;
 	spin_unlock(&ci->i_ceph_lock);
 
-	spin_lock(&dir->d_lock);
-	spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+	seq_spin_lock(&dir->d_lock);
+	seq_spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
 	list_move(&dn->d_u.d_child, &dir->d_subdirs);
 	dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
 	     dn->d_u.d_child.prev, dn->d_u.d_child.next);
-	spin_unlock(&dn->d_lock);
-	spin_unlock(&dir->d_lock);
+	seq_spin_unlock(&dn->d_lock);
+	seq_spin_unlock(&dir->d_lock);
 }
 
 /*
@@ -1254,11 +1254,11 @@ retry_lookup:
 			goto retry_lookup;
 		} else {
 			/* reorder parent's d_subdirs */
-			spin_lock(&parent->d_lock);
-			spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+			seq_spin_lock(&parent->d_lock);
+			seq_spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
 			list_move(&dn->d_u.d_child, &parent->d_subdirs);
-			spin_unlock(&dn->d_lock);
-			spin_unlock(&parent->d_lock);
+			seq_spin_unlock(&dn->d_lock);
+			seq_spin_unlock(&parent->d_lock);
 		}
 
 		di = dn->d_fsdata;
Index: tip/fs/ceph/mds_client.c
===================================================================
--- tip.orig/fs/ceph/mds_client.c
+++ tip/fs/ceph/mds_client.c
@@ -1492,25 +1492,25 @@ retry:
 	for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
 		struct inode *inode;
 
-		spin_lock(&temp->d_lock);
+		seq_spin_lock(&temp->d_lock);
 		inode = temp->d_inode;
 		if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
 			dout("build_path path+%d: %p SNAPDIR\n",
 			     pos, temp);
 		} else if (stop_on_nosnap && inode &&
 			   ceph_snap(inode) == CEPH_NOSNAP) {
-			spin_unlock(&temp->d_lock);
+			seq_spin_unlock(&temp->d_lock);
 			break;
 		} else {
 			pos -= temp->d_name.len;
 			if (pos < 0) {
-				spin_unlock(&temp->d_lock);
+				seq_spin_unlock(&temp->d_lock);
 				break;
 			}
 			strncpy(path + pos, temp->d_name.name,
 				temp->d_name.len);
 		}
-		spin_unlock(&temp->d_lock);
+		seq_spin_unlock(&temp->d_lock);
 		if (pos)
 			path[--pos] = '/';
 		temp = temp->d_parent;
@@ -2772,7 +2772,7 @@ static void handle_lease(struct ceph_mds
 	if (!dentry)
 		goto release;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	di = ceph_dentry(dentry);
 	switch (h->action) {
 	case CEPH_MDS_LEASE_REVOKE:
@@ -2800,7 +2800,7 @@ static void handle_lease(struct ceph_mds
 		}
 		break;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	dput(dentry);
 
 	if (!release)
@@ -2873,7 +2873,7 @@ void ceph_mdsc_lease_release(struct ceph
 	BUG_ON(dentry == NULL);
 
 	/* is dentry lease valid? */
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	di = ceph_dentry(dentry);
 	if (!di || !di->lease_session ||
 	    di->lease_session->s_mds < 0 ||
@@ -2882,7 +2882,7 @@ void ceph_mdsc_lease_release(struct ceph
 		dout("lease_release inode %p dentry %p -- "
 		     "no lease\n",
 		     inode, dentry);
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		return;
 	}
 
@@ -2890,7 +2890,7 @@ void ceph_mdsc_lease_release(struct ceph
 	session = ceph_get_mds_session(di->lease_session);
 	seq = di->lease_seq;
 	__ceph_mdsc_drop_dentry_lease(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	dout("lease_release inode %p dentry %p to mds%d\n",
 	     inode, dentry, session->s_mds);
Index: tip/fs/cifs/dir.c
===================================================================
--- tip.orig/fs/cifs/dir.c
+++ tip/fs/cifs/dir.c
@@ -83,10 +83,10 @@ cifs_bp_rename_retry:
 	full_path[namelen] = 0;	/* trailing null */
 	rcu_read_lock();
 	for (temp = direntry; !IS_ROOT(temp);) {
-		spin_lock(&temp->d_lock);
+		seq_spin_lock(&temp->d_lock);
 		namelen -= 1 + temp->d_name.len;
 		if (namelen < 0) {
-			spin_unlock(&temp->d_lock);
+			seq_spin_unlock(&temp->d_lock);
 			break;
 		} else {
 			full_path[namelen] = dirsep;
@@ -94,7 +94,7 @@ cifs_bp_rename_retry:
 				temp->d_name.len);
 			cFYI(0, "name: %s", full_path + namelen);
 		}
-		spin_unlock(&temp->d_lock);
+		seq_spin_unlock(&temp->d_lock);
 		temp = temp->d_parent;
 		if (temp == NULL) {
 			cERROR(1, "corrupt dentry");
Index: tip/fs/coda/cache.c
===================================================================
--- tip.orig/fs/coda/cache.c
+++ tip/fs/coda/cache.c
@@ -92,7 +92,7 @@ static void coda_flag_children(struct de
 	struct list_head *child;
 	struct dentry *de;
 
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 	list_for_each(child, &parent->d_subdirs)
 	{
 		de = list_entry(child, struct dentry, d_u.d_child);
@@ -101,7 +101,7 @@ static void coda_flag_children(struct de
 			continue;
 		coda_flag_inode(de->d_inode, flag);
 	}
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 	return; 
 }
 
Index: tip/fs/configfs/configfs_internal.h
===================================================================
--- tip.orig/fs/configfs/configfs_internal.h
+++ tip/fs/configfs/configfs_internal.h
@@ -121,7 +121,7 @@ static inline struct config_item *config
 {
 	struct config_item * item = NULL;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (!d_unhashed(dentry)) {
 		struct configfs_dirent * sd = dentry->d_fsdata;
 		if (sd->s_type & CONFIGFS_ITEM_LINK) {
@@ -130,7 +130,7 @@ static inline struct config_item *config
 		} else
 			item = config_item_get(sd->s_element);
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	return item;
 }
Index: tip/fs/configfs/inode.c
===================================================================
--- tip.orig/fs/configfs/inode.c
+++ tip/fs/configfs/inode.c
@@ -251,14 +251,14 @@ void configfs_drop_dentry(struct configf
 	struct dentry * dentry = sd->s_dentry;
 
 	if (dentry) {
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (!(d_unhashed(dentry) && dentry->d_inode)) {
 			dget_dlock(dentry);
 			__d_drop(dentry);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			simple_unlink(parent->d_inode, dentry);
 		} else
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 	}
 }
 
Index: tip/fs/dcache.c
===================================================================
--- tip.orig/fs/dcache.c
+++ tip/fs/dcache.c
@@ -193,9 +193,9 @@ static void d_free(struct dentry *dentry
  */
 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
 {
-	assert_spin_locked(&dentry->d_lock);
+	assert_seq_spin_locked(&dentry->d_lock);
 	/* Go through a barrier */
-	write_seqcount_barrier(&dentry->d_seq);
+	write_seqlock_barrier(&dentry->d_lock);
 }
 
 /*
@@ -211,7 +211,7 @@ static void dentry_iput(struct dentry * 
 	if (inode) {
 		dentry->d_inode = NULL;
 		list_del_init(&dentry->d_alias);
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		spin_unlock(&inode->i_lock);
 		if (!inode->i_nlink)
 			fsnotify_inoderemove(inode);
@@ -220,7 +220,7 @@ static void dentry_iput(struct dentry * 
 		else
 			iput(inode);
 	} else {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 }
 
@@ -236,7 +236,7 @@ static void dentry_unlink_inode(struct d
 	dentry->d_inode = NULL;
 	list_del_init(&dentry->d_alias);
 	dentry_rcuwalk_barrier(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	spin_unlock(&inode->i_lock);
 	if (!inode->i_nlink)
 		fsnotify_inoderemove(inode);
@@ -334,7 +334,7 @@ static struct dentry *d_kill(struct dent
 	 */
 	dentry->d_flags |= DCACHE_DISCONNECTED;
 	if (parent)
-		spin_unlock(&parent->d_lock);
+		seq_spin_unlock(&parent->d_lock);
 	dentry_iput(dentry);
 	/*
 	 * dentry_iput drops the locks, at which point nobody (except
@@ -391,9 +391,9 @@ EXPORT_SYMBOL(__d_drop);
 
 void d_drop(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__d_drop(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 EXPORT_SYMBOL(d_drop);
 
@@ -408,10 +408,10 @@ EXPORT_SYMBOL(d_drop);
  */
 void d_clear_need_lookup(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__d_drop(dentry);
 	dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 EXPORT_SYMBOL(d_clear_need_lookup);
 
@@ -430,7 +430,7 @@ static inline struct dentry *dentry_kill
 	inode = dentry->d_inode;
 	if (inode && !spin_trylock(&inode->i_lock)) {
 relock:
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		cpu_relax();
 		return dentry; /* try again with same dentry */
 	}
@@ -438,7 +438,7 @@ relock:
 		parent = NULL;
 	else
 		parent = dentry->d_parent;
-	if (parent && !spin_trylock(&parent->d_lock)) {
+	if (parent && !seq_spin_trylock(&parent->d_lock)) {
 		if (inode)
 			spin_unlock(&inode->i_lock);
 		goto relock;
@@ -491,11 +491,11 @@ void dput(struct dentry *dentry)
 repeat:
 	if (dentry->d_count == 1)
 		might_sleep();
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	BUG_ON(!dentry->d_count);
 	if (dentry->d_count > 1) {
 		dentry->d_count--;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		return;
 	}
 
@@ -518,7 +518,7 @@ repeat:
 	dentry_lru_add(dentry);
 
 	dentry->d_count--;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return;
 
 kill_it:
@@ -545,9 +545,9 @@ int d_invalidate(struct dentry * dentry)
 	/*
 	 * If it's already been dropped, return OK.
 	 */
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (d_unhashed(dentry)) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		return 0;
 	}
 	/*
@@ -555,9 +555,9 @@ int d_invalidate(struct dentry * dentry)
 	 * to get rid of unused child entries.
 	 */
 	if (!list_empty(&dentry->d_subdirs)) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		shrink_dcache_parent(dentry);
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 	}
 
 	/*
@@ -574,13 +574,13 @@ int d_invalidate(struct dentry * dentry)
 	 */
 	if (dentry->d_count > 1 && dentry->d_inode) {
 		if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			return -EBUSY;
 		}
 	}
 
 	__d_drop(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return 0;
 }
 EXPORT_SYMBOL(d_invalidate);
@@ -593,9 +593,9 @@ static inline void __dget_dlock(struct d
 
 static inline void __dget(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__dget_dlock(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 struct dentry *dget_parent(struct dentry *dentry)
@@ -609,16 +609,16 @@ repeat:
 	 */
 	rcu_read_lock();
 	ret = dentry->d_parent;
-	spin_lock(&ret->d_lock);
+	seq_spin_lock(&ret->d_lock);
 	if (unlikely(ret != dentry->d_parent)) {
-		spin_unlock(&ret->d_lock);
+		seq_spin_unlock(&ret->d_lock);
 		rcu_read_unlock();
 		goto repeat;
 	}
 	rcu_read_unlock();
 	BUG_ON(!ret->d_count);
 	ret->d_count++;
-	spin_unlock(&ret->d_lock);
+	seq_spin_unlock(&ret->d_lock);
 	return ret;
 }
 EXPORT_SYMBOL(dget_parent);
@@ -646,31 +646,31 @@ static struct dentry *__d_find_alias(str
 again:
 	discon_alias = NULL;
 	list_for_each_entry(alias, &inode->i_dentry, d_alias) {
-		spin_lock(&alias->d_lock);
+		seq_spin_lock(&alias->d_lock);
  		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 			if (IS_ROOT(alias) &&
 			    (alias->d_flags & DCACHE_DISCONNECTED)) {
 				discon_alias = alias;
 			} else if (!want_discon) {
 				__dget_dlock(alias);
-				spin_unlock(&alias->d_lock);
+				seq_spin_unlock(&alias->d_lock);
 				return alias;
 			}
 		}
-		spin_unlock(&alias->d_lock);
+		seq_spin_unlock(&alias->d_lock);
 	}
 	if (discon_alias) {
 		alias = discon_alias;
-		spin_lock(&alias->d_lock);
+		seq_spin_lock(&alias->d_lock);
 		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 			if (IS_ROOT(alias) &&
 			    (alias->d_flags & DCACHE_DISCONNECTED)) {
 				__dget_dlock(alias);
-				spin_unlock(&alias->d_lock);
+				seq_spin_unlock(&alias->d_lock);
 				return alias;
 			}
 		}
-		spin_unlock(&alias->d_lock);
+		seq_spin_unlock(&alias->d_lock);
 		goto again;
 	}
 	return NULL;
@@ -699,16 +699,16 @@ void d_prune_aliases(struct inode *inode
 restart:
 	spin_lock(&inode->i_lock);
 	list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (!dentry->d_count) {
 			__dget_dlock(dentry);
 			__d_drop(dentry);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			spin_unlock(&inode->i_lock);
 			dput(dentry);
 			goto restart;
 		}
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 	spin_unlock(&inode->i_lock);
 }
@@ -745,10 +745,10 @@ static void try_prune_one_dentry(struct 
 	/* Prune ancestors. */
 	dentry = parent;
 	while (dentry) {
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (dentry->d_count > 1) {
 			dentry->d_count--;
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			return;
 		}
 		dentry = dentry_kill(dentry, 1);
@@ -764,9 +764,9 @@ static void shrink_dentry_list(struct li
 		dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
 		if (&dentry->d_lru == list)
 			break; /* empty */
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			continue;
 		}
 
@@ -777,7 +777,7 @@ static void shrink_dentry_list(struct li
 		 */
 		if (dentry->d_count) {
 			dentry_lru_del(dentry);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			continue;
 		}
 
@@ -815,7 +815,7 @@ relock:
 				struct dentry, d_lru);
 		BUG_ON(dentry->d_sb != sb);
 
-		if (!spin_trylock(&dentry->d_lock)) {
+		if (!seq_spin_trylock(&dentry->d_lock)) {
 			spin_unlock(&dcache_lru_lock);
 			cpu_relax();
 			goto relock;
@@ -824,11 +824,11 @@ relock:
 		if (dentry->d_flags & DCACHE_REFERENCED) {
 			dentry->d_flags &= ~DCACHE_REFERENCED;
 			list_move(&dentry->d_lru, &referenced);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 		} else {
 			list_move_tail(&dentry->d_lru, &tmp);
 			dentry->d_flags |= DCACHE_SHRINK_LIST;
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			if (!--count)
 				break;
 		}
@@ -981,8 +981,8 @@ static struct dentry *try_to_ascend(stru
 	struct dentry *new = old->d_parent;
 
 	rcu_read_lock();
-	spin_unlock(&old->d_lock);
-	spin_lock(&new->d_lock);
+	seq_spin_unlock(&old->d_lock);
+	seq_spin_lock(&new->d_lock);
 
 	/*
 	 * might go back up the wrong parent if we have had a rename
@@ -991,7 +991,7 @@ static struct dentry *try_to_ascend(stru
 	if (new != old->d_parent ||
 		 (old->d_flags & DCACHE_DISCONNECTED) ||
 		 (!locked && read_seqretry(&rename_lock, seq))) {
-		spin_unlock(&new->d_lock);
+		seq_spin_unlock(&new->d_lock);
 		new = NULL;
 	}
 	rcu_read_unlock();
@@ -1025,7 +1025,7 @@ again:
 
 	if (d_mountpoint(parent))
 		goto positive;
-	spin_lock(&this_parent->d_lock);
+	seq_spin_lock(&this_parent->d_lock);
 repeat:
 	next = this_parent->d_subdirs.next;
 resume:
@@ -1034,21 +1034,21 @@ resume:
 		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 		next = tmp->next;
 
-		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 		/* Have we found a mount point ? */
 		if (d_mountpoint(dentry)) {
-			spin_unlock(&dentry->d_lock);
-			spin_unlock(&this_parent->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&this_parent->d_lock);
 			goto positive;
 		}
 		if (!list_empty(&dentry->d_subdirs)) {
-			spin_unlock(&this_parent->d_lock);
-			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
+			seq_spin_unlock(&this_parent->d_lock);
+			spin_release(&dentry->d_lock.lock.dep_map, 1, _RET_IP_);
 			this_parent = dentry;
-			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+			spin_acquire(&this_parent->d_lock.lock.dep_map, 0, 1, _RET_IP_);
 			goto repeat;
 		}
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 	/*
 	 * All done at this level ... ascend and resume the search.
@@ -1061,7 +1061,7 @@ resume:
 		next = child->d_u.d_child.next;
 		goto resume;
 	}
-	spin_unlock(&this_parent->d_lock);
+	seq_spin_unlock(&this_parent->d_lock);
 	if (!locked && read_seqretry(&rename_lock, seq))
 		goto rename_retry;
 	if (locked)
@@ -1106,7 +1106,7 @@ static int select_parent(struct dentry *
 	seq = read_seqbegin(&rename_lock);
 again:
 	this_parent = parent;
-	spin_lock(&this_parent->d_lock);
+	seq_spin_lock(&this_parent->d_lock);
 repeat:
 	next = this_parent->d_subdirs.next;
 resume:
@@ -1115,7 +1115,7 @@ resume:
 		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 		next = tmp->next;
 
-		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 
 		/*
 		 * move only zero ref count dentries to the dispose list.
@@ -1138,7 +1138,7 @@ resume:
 		 * the rest.
 		 */
 		if (found && need_resched()) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			goto out;
 		}
 
@@ -1146,14 +1146,14 @@ resume:
 		 * Descend a level if the d_subdirs list is non-empty.
 		 */
 		if (!list_empty(&dentry->d_subdirs)) {
-			spin_unlock(&this_parent->d_lock);
-			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
+			seq_spin_unlock(&this_parent->d_lock);
+			spin_release(&dentry->d_lock.lock.dep_map, 1, _RET_IP_);
 			this_parent = dentry;
-			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+			spin_acquire(&this_parent->d_lock.lock.dep_map, 0, 1, _RET_IP_);
 			goto repeat;
 		}
 
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 	/*
 	 * All done at this level ... ascend and resume the search.
@@ -1167,7 +1167,7 @@ resume:
 		goto resume;
 	}
 out:
-	spin_unlock(&this_parent->d_lock);
+	seq_spin_unlock(&this_parent->d_lock);
 	if (!locked && read_seqretry(&rename_lock, seq))
 		goto rename_retry;
 	if (locked)
@@ -1235,8 +1235,7 @@ struct dentry *__d_alloc(struct super_bl
 
 	dentry->d_count = 1;
 	dentry->d_flags = 0;
-	spin_lock_init(&dentry->d_lock);
-	seqcount_init(&dentry->d_seq);
+	seqlock_init(&dentry->d_lock);
 	dentry->d_inode = NULL;
 	dentry->d_parent = dentry;
 	dentry->d_sb = sb;
@@ -1269,7 +1268,7 @@ struct dentry *d_alloc(struct dentry * p
 	if (!dentry)
 		return NULL;
 
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 	/*
 	 * don't need child lock because it is not subject
 	 * to concurrency here
@@ -1277,7 +1276,7 @@ struct dentry *d_alloc(struct dentry * p
 	__dget_dlock(parent);
 	dentry->d_parent = parent;
 	list_add(&dentry->d_u.d_child, &parent->d_subdirs);
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 
 	return dentry;
 }
@@ -1329,7 +1328,7 @@ EXPORT_SYMBOL(d_set_d_op);
 
 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (inode) {
 		if (unlikely(IS_AUTOMOUNT(inode)))
 			dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
@@ -1337,7 +1336,7 @@ static void __d_instantiate(struct dentr
 	}
 	dentry->d_inode = inode;
 	dentry_rcuwalk_barrier(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	fsnotify_d_instantiate(dentry, inode);
 }
 
@@ -1561,14 +1560,14 @@ struct dentry *d_obtain_alias(struct ino
 	}
 
 	/* attach a disconnected dentry */
-	spin_lock(&tmp->d_lock);
+	seq_spin_lock(&tmp->d_lock);
 	tmp->d_inode = inode;
 	tmp->d_flags |= DCACHE_DISCONNECTED;
 	list_add(&tmp->d_alias, &inode->i_dentry);
 	hlist_bl_lock(&tmp->d_sb->s_anon);
 	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
 	hlist_bl_unlock(&tmp->d_sb->s_anon);
-	spin_unlock(&tmp->d_lock);
+	seq_spin_unlock(&tmp->d_lock);
 	spin_unlock(&inode->i_lock);
 	security_d_instantiate(tmp, inode);
 
@@ -1778,7 +1777,7 @@ struct dentry *__d_lookup_rcu(const stru
 			continue;
 
 seqretry:
-		seq = read_seqcount_begin(&dentry->d_seq);
+		seq = read_seqbegin(&dentry->d_lock);
 		if (dentry->d_parent != parent)
 			continue;
 		if (d_unhashed(dentry))
@@ -1793,7 +1792,7 @@ seqretry:
 		 * edge of memory when walking. If we could load this
 		 * atomically some other way, we could drop this check.
 		 */
-		if (read_seqcount_retry(&dentry->d_seq, seq))
+		if (read_seqretry(&dentry->d_lock, seq))
 			goto seqretry;
 		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
 			if (parent->d_op->d_compare(parent, *inode,
@@ -1897,7 +1896,7 @@ struct dentry *__d_lookup(struct dentry 
 		if (dentry->d_name.hash != hash)
 			continue;
 
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (dentry->d_parent != parent)
 			goto next;
 		if (d_unhashed(dentry))
@@ -1921,10 +1920,10 @@ struct dentry *__d_lookup(struct dentry 
 
 		dentry->d_count++;
 		found = dentry;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		break;
 next:
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
  	}
  	rcu_read_unlock();
 
@@ -1972,17 +1971,17 @@ int d_validate(struct dentry *dentry, st
 {
 	struct dentry *child;
 
-	spin_lock(&dparent->d_lock);
+	seq_spin_lock(&dparent->d_lock);
 	list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
 		if (dentry == child) {
-			spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+			spin_lock_nested(&dentry->d_lock.lock, DENTRY_D_LOCK_NESTED);
 			__dget_dlock(dentry);
-			spin_unlock(&dentry->d_lock);
-			spin_unlock(&dparent->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dparent->d_lock);
 			return 1;
 		}
 	}
-	spin_unlock(&dparent->d_lock);
+	seq_spin_unlock(&dparent->d_lock);
 
 	return 0;
 }
@@ -2017,12 +2016,12 @@ void d_delete(struct dentry * dentry)
 	 * Are we the only user?
 	 */
 again:
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	inode = dentry->d_inode;
 	isdir = S_ISDIR(inode->i_mode);
 	if (dentry->d_count == 1) {
 		if (inode && !spin_trylock(&inode->i_lock)) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			cpu_relax();
 			goto again;
 		}
@@ -2035,7 +2034,7 @@ again:
 	if (!d_unhashed(dentry))
 		__d_drop(dentry);
 
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	fsnotify_nameremove(dentry, isdir);
 }
@@ -2064,9 +2063,9 @@ static void _d_rehash(struct dentry * en
  
 void d_rehash(struct dentry * entry)
 {
-	spin_lock(&entry->d_lock);
+	seq_spin_lock(&entry->d_lock);
 	_d_rehash(entry);
-	spin_unlock(&entry->d_lock);
+	seq_spin_unlock(&entry->d_lock);
 }
 EXPORT_SYMBOL(d_rehash);
 
@@ -2089,11 +2088,9 @@ void dentry_update_name_case(struct dent
 	BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
 	BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
 
-	spin_lock(&dentry->d_lock);
-	write_seqcount_begin(&dentry->d_seq);
+	write_seqlock(&dentry->d_lock);
 	memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
-	write_seqcount_end(&dentry->d_seq);
-	spin_unlock(&dentry->d_lock);
+	write_sequnlock(&dentry->d_lock);
 }
 EXPORT_SYMBOL(dentry_update_name_case);
 
@@ -2144,24 +2141,24 @@ static void dentry_lock_for_move(struct 
 	 * XXXX: do we really need to take target->d_lock?
 	 */
 	if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
-		spin_lock(&target->d_parent->d_lock);
+		seq_spin_lock(&target->d_parent->d_lock);
 	else {
 		if (d_ancestor(dentry->d_parent, target->d_parent)) {
-			spin_lock(&dentry->d_parent->d_lock);
-			spin_lock_nested(&target->d_parent->d_lock,
-						DENTRY_D_LOCK_NESTED);
+			seq_spin_lock(&dentry->d_parent->d_lock);
+			seq_spin_lock_nested(&target->d_parent->d_lock,
+					    DENTRY_D_LOCK_NESTED);
 		} else {
-			spin_lock(&target->d_parent->d_lock);
-			spin_lock_nested(&dentry->d_parent->d_lock,
-						DENTRY_D_LOCK_NESTED);
+			seq_spin_lock(&target->d_parent->d_lock);
+			seq_spin_lock_nested(&dentry->d_parent->d_lock,
+					    DENTRY_D_LOCK_NESTED);
 		}
 	}
 	if (target < dentry) {
-		spin_lock_nested(&target->d_lock, 2);
-		spin_lock_nested(&dentry->d_lock, 3);
+		seq_spin_lock_nested(&target->d_lock, 2);
+		seq_spin_lock_nested(&dentry->d_lock, 3);
 	} else {
-		spin_lock_nested(&dentry->d_lock, 2);
-		spin_lock_nested(&target->d_lock, 3);
+		seq_spin_lock_nested(&dentry->d_lock, 2);
+		seq_spin_lock_nested(&target->d_lock, 3);
 	}
 }
 
@@ -2169,9 +2166,9 @@ static void dentry_unlock_parents_for_mo
 					struct dentry *target)
 {
 	if (target->d_parent != dentry->d_parent)
-		spin_unlock(&dentry->d_parent->d_lock);
+		seq_spin_unlock(&dentry->d_parent->d_lock);
 	if (target->d_parent != target)
-		spin_unlock(&target->d_parent->d_lock);
+		seq_spin_unlock(&target->d_parent->d_lock);
 }
 
 /*
@@ -2205,8 +2202,8 @@ static void __d_move(struct dentry * den
 
 	dentry_lock_for_move(dentry, target);
 
-	write_seqcount_begin(&dentry->d_seq);
-	write_seqcount_begin(&target->d_seq);
+	write_seqlock_begin(&dentry->d_lock);
+	write_seqlock_begin(&target->d_lock);
 
 	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
 
@@ -2241,13 +2238,13 @@ static void __d_move(struct dentry * den
 
 	list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
 
-	write_seqcount_end(&target->d_seq);
-	write_seqcount_end(&dentry->d_seq);
+	write_seqlock_end(&target->d_lock);
+	write_seqlock_end(&dentry->d_lock);
 
 	dentry_unlock_parents_for_move(dentry, target);
-	spin_unlock(&target->d_lock);
+	seq_spin_unlock(&target->d_lock);
 	fsnotify_d_move(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 /*
@@ -2336,8 +2333,8 @@ static void __d_materialise_dentry(struc
 
 	dentry_lock_for_move(anon, dentry);
 
-	write_seqcount_begin(&dentry->d_seq);
-	write_seqcount_begin(&anon->d_seq);
+	write_seqlock_begin(&dentry->d_lock);
+	write_seqlock_begin(&anon->d_lock);
 
 	dparent = dentry->d_parent;
 	aparent = anon->d_parent;
@@ -2359,11 +2356,11 @@ static void __d_materialise_dentry(struc
 	else
 		INIT_LIST_HEAD(&anon->d_u.d_child);
 
-	write_seqcount_end(&dentry->d_seq);
-	write_seqcount_end(&anon->d_seq);
+	write_seqlock_end(&dentry->d_lock);
+	write_seqlock_end(&anon->d_lock);
 
 	dentry_unlock_parents_for_move(anon, dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	/* anon->d_lock still locked, returns locked */
 	anon->d_flags &= ~DCACHE_DISCONNECTED;
@@ -2439,10 +2436,10 @@ struct dentry *d_materialise_unique(stru
 	else
 		BUG_ON(!d_unhashed(actual));
 
-	spin_lock(&actual->d_lock);
+	seq_spin_lock(&actual->d_lock);
 found:
 	_d_rehash(actual);
-	spin_unlock(&actual->d_lock);
+	seq_spin_unlock(&actual->d_lock);
 	spin_unlock(&inode->i_lock);
 out_nolock:
 	if (actual == dentry) {
@@ -2504,9 +2501,9 @@ static int prepend_path(const struct pat
 		}
 		parent = dentry->d_parent;
 		prefetch(parent);
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		error = prepend_name(buffer, buflen, &dentry->d_name);
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		if (!error)
 			error = prepend(buffer, buflen, "/", 1);
 		if (error)
@@ -2731,9 +2728,9 @@ static char *__dentry_path(struct dentry
 		int error;
 
 		prefetch(parent);
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		error = prepend_name(&end, &buflen, &dentry->d_name);
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
 			goto Elong;
 
@@ -2898,7 +2895,7 @@ void d_genocide(struct dentry *root)
 	seq = read_seqbegin(&rename_lock);
 again:
 	this_parent = root;
-	spin_lock(&this_parent->d_lock);
+	seq_spin_lock(&this_parent->d_lock);
 repeat:
 	next = this_parent->d_subdirs.next;
 resume:
@@ -2907,23 +2904,23 @@ resume:
 		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 		next = tmp->next;
 
-		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 		if (d_unhashed(dentry) || !dentry->d_inode) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			continue;
 		}
 		if (!list_empty(&dentry->d_subdirs)) {
-			spin_unlock(&this_parent->d_lock);
-			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
+			seq_spin_unlock(&this_parent->d_lock);
+			spin_release(&dentry->d_lock.lock.dep_map, 1, _RET_IP_);
 			this_parent = dentry;
-			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+			spin_acquire(&this_parent->d_lock.lock.dep_map, 0, 1, _RET_IP_);
 			goto repeat;
 		}
 		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
 			dentry->d_flags |= DCACHE_GENOCIDE;
 			dentry->d_count--;
 		}
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 	if (this_parent != root) {
 		struct dentry *child = this_parent;
@@ -2937,7 +2934,7 @@ resume:
 		next = child->d_u.d_child.next;
 		goto resume;
 	}
-	spin_unlock(&this_parent->d_lock);
+	seq_spin_unlock(&this_parent->d_lock);
 	if (!locked && read_seqretry(&rename_lock, seq))
 		goto rename_retry;
 	if (locked)
Index: tip/fs/dcookies.c
===================================================================
--- tip.orig/fs/dcookies.c
+++ tip/fs/dcookies.c
@@ -98,9 +98,9 @@ static struct dcookie_struct *alloc_dcoo
 		return NULL;
 
 	d = path->dentry;
-	spin_lock(&d->d_lock);
+	seq_spin_lock(&d->d_lock);
 	d->d_flags |= DCACHE_COOKIE;
-	spin_unlock(&d->d_lock);
+	seq_spin_unlock(&d->d_lock);
 
 	dcs->path = *path;
 	path_get(path);
@@ -267,9 +267,9 @@ static void free_dcookie(struct dcookie_
 {
 	struct dentry *d = dcs->path.dentry;
 
-	spin_lock(&d->d_lock);
+	seq_spin_lock(&d->d_lock);
 	d->d_flags &= ~DCACHE_COOKIE;
-	spin_unlock(&d->d_lock);
+	seq_spin_unlock(&d->d_lock);
 
 	path_put(&dcs->path);
 	kmem_cache_free(dcookie_cache, dcs);
Index: tip/fs/exportfs/expfs.c
===================================================================
--- tip.orig/fs/exportfs/expfs.c
+++ tip/fs/exportfs/expfs.c
@@ -114,15 +114,15 @@ reconnect_path(struct vfsmount *mnt, str
 
 		if (!IS_ROOT(pd)) {
 			/* must have found a connected parent - great */
-			spin_lock(&pd->d_lock);
+			seq_spin_lock(&pd->d_lock);
 			pd->d_flags &= ~DCACHE_DISCONNECTED;
-			spin_unlock(&pd->d_lock);
+			seq_spin_unlock(&pd->d_lock);
 			noprogress = 0;
 		} else if (pd == mnt->mnt_sb->s_root) {
 			printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
-			spin_lock(&pd->d_lock);
+			seq_spin_lock(&pd->d_lock);
 			pd->d_flags &= ~DCACHE_DISCONNECTED;
-			spin_unlock(&pd->d_lock);
+			seq_spin_unlock(&pd->d_lock);
 			noprogress = 0;
 		} else {
 			/*
@@ -335,11 +335,11 @@ static int export_encode_fh(struct dentr
 	if (connectable && !S_ISDIR(inode->i_mode)) {
 		struct inode *parent;
 
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		parent = dentry->d_parent->d_inode;
 		fid->i32.parent_ino = parent->i_ino;
 		fid->i32.parent_gen = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		len = 4;
 		type = FILEID_INO32_GEN_PARENT;
 	}
Index: tip/fs/fat/inode.c
===================================================================
--- tip.orig/fs/fat/inode.c
+++ tip/fs/fat/inode.c
@@ -771,9 +771,9 @@ fat_encode_fh(struct dentry *de, __u32 *
 	fh[1] = inode->i_generation;
 	fh[2] = ipos_h;
 	fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
-	spin_lock(&de->d_lock);
+	seq_spin_lock(&de->d_lock);
 	fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart;
-	spin_unlock(&de->d_lock);
+	seq_spin_unlock(&de->d_lock);
 	return 3;
 }
 
Index: tip/fs/fat/namei_vfat.c
===================================================================
--- tip.orig/fs/fat/namei_vfat.c
+++ tip/fs/fat/namei_vfat.c
@@ -34,10 +34,10 @@
 static int vfat_revalidate_shortname(struct dentry *dentry)
 {
 	int ret = 1;
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_time != dentry->d_parent->d_inode->i_version)
 		ret = 0;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return ret;
 }
 
Index: tip/fs/fs-writeback.c
===================================================================
--- tip.orig/fs/fs-writeback.c
+++ tip/fs/fs-writeback.c
@@ -1019,7 +1019,7 @@ static noinline void block_dump___mark_i
 
 		dentry = d_find_alias(inode);
 		if (dentry) {
-			spin_lock(&dentry->d_lock);
+			seq_spin_lock(&dentry->d_lock);
 			name = (const char *) dentry->d_name.name;
 		}
 		printk(KERN_DEBUG
@@ -1027,7 +1027,7 @@ static noinline void block_dump___mark_i
 		       current->comm, task_pid_nr(current), inode->i_ino,
 		       name, inode->i_sb->s_id);
 		if (dentry) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			dput(dentry);
 		}
 	}
Index: tip/fs/fuse/inode.c
===================================================================
--- tip.orig/fs/fuse/inode.c
+++ tip/fs/fuse/inode.c
@@ -651,11 +651,11 @@ static int fuse_encode_fh(struct dentry 
 	if (encode_parent) {
 		struct inode *parent;
 
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		parent = dentry->d_parent->d_inode;
 		nodeid = get_fuse_inode(parent)->nodeid;
 		generation = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 
 		fh[3] = (u32)(nodeid >> 32);
 		fh[4] = (u32)(nodeid & 0xffffffff);
Index: tip/fs/gfs2/export.c
===================================================================
--- tip.orig/fs/gfs2/export.c
+++ tip/fs/gfs2/export.c
@@ -53,11 +53,11 @@ static int gfs2_encode_fh(struct dentry 
 	if (!connectable || inode == sb->s_root->d_inode)
 		return *len;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	inode = dentry->d_parent->d_inode;
 	ip = GFS2_I(inode);
 	igrab(inode);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32);
 	fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF);
Index: tip/fs/isofs/export.c
===================================================================
--- tip.orig/fs/isofs/export.c
+++ tip/fs/isofs/export.c
@@ -139,13 +139,13 @@ isofs_export_encode_fh(struct dentry *de
 	if (connectable && !S_ISDIR(inode->i_mode)) {
 		struct inode *parent;
 		struct iso_inode_info *eparent;
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		parent = dentry->d_parent->d_inode;
 		eparent = ISOFS_I(parent);
 		fh32[3] = eparent->i_iget5_block;
 		fh16[3] = (__u16)eparent->i_iget5_offset;  /* fh16 [sic] */
 		fh32[4] = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		len = 5;
 		type = 2;
 	}
Index: tip/fs/libfs.c
===================================================================
--- tip.orig/fs/libfs.c
+++ tip/fs/libfs.c
@@ -102,21 +102,21 @@ loff_t dcache_dir_lseek(struct file *fil
 			struct dentry *cursor = file->private_data;
 			loff_t n = file->f_pos - 2;
 
-			spin_lock(&dentry->d_lock);
+			seq_spin_lock(&dentry->d_lock);
 			/* d_lock not required for cursor */
 			list_del(&cursor->d_u.d_child);
 			p = dentry->d_subdirs.next;
 			while (n && p != &dentry->d_subdirs) {
 				struct dentry *next;
 				next = list_entry(p, struct dentry, d_u.d_child);
-				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+				seq_spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
 				if (simple_positive(next))
 					n--;
-				spin_unlock(&next->d_lock);
+				seq_spin_unlock(&next->d_lock);
 				p = p->next;
 			}
 			list_add_tail(&cursor->d_u.d_child, p);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 		}
 	}
 	mutex_unlock(&dentry->d_inode->i_mutex);
@@ -159,35 +159,35 @@ int dcache_readdir(struct file * filp, v
 			i++;
 			/* fallthrough */
 		default:
-			spin_lock(&dentry->d_lock);
+			seq_spin_lock(&dentry->d_lock);
 			if (filp->f_pos == 2)
 				list_move(q, &dentry->d_subdirs);
 
 			for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
 				struct dentry *next;
 				next = list_entry(p, struct dentry, d_u.d_child);
-				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+				seq_spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
 				if (!simple_positive(next)) {
-					spin_unlock(&next->d_lock);
+					seq_spin_unlock(&next->d_lock);
 					continue;
 				}
 
-				spin_unlock(&next->d_lock);
-				spin_unlock(&dentry->d_lock);
+				seq_spin_unlock(&next->d_lock);
+				seq_spin_unlock(&dentry->d_lock);
 				if (filldir(dirent, next->d_name.name, 
 					    next->d_name.len, filp->f_pos, 
 					    next->d_inode->i_ino, 
 					    dt_type(next->d_inode)) < 0)
 					return 0;
-				spin_lock(&dentry->d_lock);
-				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+				seq_spin_lock(&dentry->d_lock);
+				seq_spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
 				/* next is still alive */
 				list_move(q, p);
-				spin_unlock(&next->d_lock);
+				seq_spin_unlock(&next->d_lock);
 				p = q;
 				filp->f_pos++;
 			}
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 	}
 	return 0;
 }
@@ -281,18 +281,18 @@ int simple_empty(struct dentry *dentry)
 	struct dentry *child;
 	int ret = 0;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
-		spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
 		if (simple_positive(child)) {
-			spin_unlock(&child->d_lock);
+			seq_spin_unlock(&child->d_lock);
 			goto out;
 		}
-		spin_unlock(&child->d_lock);
+		seq_spin_unlock(&child->d_lock);
 	}
 	ret = 1;
 out:
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return ret;
 }
 
Index: tip/fs/namei.c
===================================================================
--- tip.orig/fs/namei.c
+++ tip/fs/namei.c
@@ -432,7 +432,7 @@ static int unlazy_walk(struct nameidata 
 				nd->root.dentry != fs->root.dentry)
 			goto err_root;
 	}
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 	if (!dentry) {
 		if (!__d_rcu_to_refcount(parent, nd->seq))
 			goto err_parent;
@@ -440,7 +440,7 @@ static int unlazy_walk(struct nameidata 
 	} else {
 		if (dentry->d_parent != parent)
 			goto err_parent;
-		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 		if (!__d_rcu_to_refcount(dentry, nd->seq))
 			goto err_child;
 		/*
@@ -452,9 +452,9 @@ static int unlazy_walk(struct nameidata 
 		BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
 		BUG_ON(!parent->d_count);
 		parent->d_count++;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 	if (want_root) {
 		path_get(&nd->root);
 		seq_spin_unlock(&fs->lock);
@@ -467,9 +467,9 @@ static int unlazy_walk(struct nameidata 
 	return 0;
 
 err_child:
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 err_parent:
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 err_root:
 	if (want_root)
 		seq_spin_unlock(&fs->lock);
@@ -516,15 +516,15 @@ static int complete_walk(struct nameidat
 		nd->flags &= ~LOOKUP_RCU;
 		if (!(nd->flags & LOOKUP_ROOT))
 			nd->root.mnt = NULL;
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			rcu_read_unlock();
 			br_read_unlock(vfsmount_lock);
 			return -ECHILD;
 		}
 		BUG_ON(nd->inode != dentry->d_inode);
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		mntget(nd->path.mnt);
 		rcu_read_unlock();
 		br_read_unlock(vfsmount_lock);
@@ -568,7 +568,7 @@ static __always_inline void set_root_rcu
 		do {
 			seq = read_seqbegin(&fs->lock);
 			nd->root = fs->root;
-			nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
+			nd->seq = __read_seqbegin(&nd->root.dentry->d_lock);
 		} while (read_seqretry(&fs->lock, seq));
 	}
 }
@@ -902,7 +902,7 @@ static bool __follow_mount_rcu(struct na
 		path->mnt = &mounted->mnt;
 		path->dentry = mounted->mnt.mnt_root;
 		nd->flags |= LOOKUP_JUMPED;
-		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+		nd->seq = read_seqbegin(&path->dentry->d_lock);
 		/*
 		 * Update the inode too. We don't need to re-check the
 		 * dentry sequence number here after this d_inode read,
@@ -922,7 +922,7 @@ static void follow_mount_rcu(struct name
 			break;
 		nd->path.mnt = &mounted->mnt;
 		nd->path.dentry = mounted->mnt.mnt_root;
-		nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+		nd->seq = read_seqbegin(&nd->path.dentry->d_lock);
 	}
 }
 
@@ -940,8 +940,8 @@ static int follow_dotdot_rcu(struct name
 			struct dentry *parent = old->d_parent;
 			unsigned seq;
 
-			seq = read_seqcount_begin(&parent->d_seq);
-			if (read_seqcount_retry(&old->d_seq, nd->seq))
+			seq = read_seqbegin(&parent->d_lock);
+			if (read_seqretry(&old->d_lock, nd->seq))
 				goto failed;
 			nd->path.dentry = parent;
 			nd->seq = seq;
@@ -949,7 +949,7 @@ static int follow_dotdot_rcu(struct name
 		}
 		if (!follow_up_rcu(&nd->path))
 			break;
-		nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+		nd->seq = read_seqbegin(&nd->path.dentry->d_lock);
 	}
 	follow_mount_rcu(nd);
 	nd->inode = nd->path.dentry->d_inode;
@@ -1135,7 +1135,7 @@ static int do_lookup(struct nameidata *n
 			goto unlazy;
 
 		/* Memory barrier in read_seqcount_begin of child is enough */
-		if (__read_seqcount_retry(&parent->d_seq, nd->seq))
+		if (__read_seqretry(&parent->d_lock, nd->seq))
 			return -ECHILD;
 		nd->seq = seq;
 
@@ -1517,7 +1517,7 @@ static int path_init(int dfd, const char
 		if (flags & LOOKUP_RCU) {
 			br_read_lock(vfsmount_lock);
 			rcu_read_lock();
-			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+			nd->seq = __read_seqbegin(&nd->path.dentry->d_lock);
 		} else {
 			path_get(&nd->path);
 		}
@@ -1547,7 +1547,7 @@ static int path_init(int dfd, const char
 			do {
 				seq = read_seqbegin(&fs->lock);
 				nd->path = fs->pwd;
-				nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+				nd->seq = __read_seqbegin(&nd->path.dentry->d_lock);
 			} while (read_seqretry(&fs->lock, seq));
 		} else {
 			get_fs_pwd(current->fs, &nd->path);
@@ -1576,7 +1576,7 @@ static int path_init(int dfd, const char
 		if (flags & LOOKUP_RCU) {
 			if (fput_needed)
 				*fp = file;
-			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+			nd->seq = __read_seqbegin(&nd->path.dentry->d_lock);
 			br_read_lock(vfsmount_lock);
 			rcu_read_lock();
 		} else {
@@ -2638,10 +2638,10 @@ SYSCALL_DEFINE2(mkdir, const char __user
 void dentry_unhash(struct dentry *dentry)
 {
 	shrink_dcache_parent(dentry);
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_count == 1)
 		__d_drop(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 int vfs_rmdir(struct inode *dir, struct dentry *dentry)
Index: tip/fs/namespace.c
===================================================================
--- tip.orig/fs/namespace.c
+++ tip/fs/namespace.c
@@ -577,9 +577,9 @@ static void dentry_reset_mounted(struct 
 				return;
 		}
 	}
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	dentry->d_flags &= ~DCACHE_MOUNTED;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 /*
@@ -605,9 +605,9 @@ void mnt_set_mountpoint(struct mount *mn
 	mnt_add_count(mnt, 1);	/* essentially, that's mntget */
 	child_mnt->mnt_mountpoint = dget(dentry);
 	child_mnt->mnt_parent = mnt;
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	dentry->d_flags |= DCACHE_MOUNTED;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 /*
Index: tip/fs/ncpfs/dir.c
===================================================================
--- tip.orig/fs/ncpfs/dir.c
+++ tip/fs/ncpfs/dir.c
@@ -388,7 +388,7 @@ ncp_dget_fpos(struct dentry *dentry, str
 	}
 
 	/* If a pointer is invalid, we search the dentry. */
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dent = list_entry(next, struct dentry, d_u.d_child);
@@ -397,12 +397,12 @@ ncp_dget_fpos(struct dentry *dentry, str
 				dget(dent);
 			else
 				dent = NULL;
-			spin_unlock(&parent->d_lock);
+			seq_spin_unlock(&parent->d_lock);
 			goto out;
 		}
 		next = next->next;
 	}
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 	return NULL;
 
 out:
Index: tip/fs/ncpfs/ncplib_kernel.h
===================================================================
--- tip.orig/fs/ncpfs/ncplib_kernel.h
+++ tip/fs/ncpfs/ncplib_kernel.h
@@ -191,7 +191,7 @@ ncp_renew_dentries(struct dentry *parent
 	struct list_head *next;
 	struct dentry *dentry;
 
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -203,7 +203,7 @@ ncp_renew_dentries(struct dentry *parent
 
 		next = next->next;
 	}
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 }
 
 static inline void
@@ -213,7 +213,7 @@ ncp_invalidate_dircache_entries(struct d
 	struct list_head *next;
 	struct dentry *dentry;
 
-	spin_lock(&parent->d_lock);
+	seq_spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -221,7 +221,7 @@ ncp_invalidate_dircache_entries(struct d
 		ncp_age_dentry(server, dentry);
 		next = next->next;
 	}
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 }
 
 struct ncp_cache_head {
Index: tip/fs/nfs/dir.c
===================================================================
--- tip.orig/fs/nfs/dir.c
+++ tip/fs/nfs/dir.c
@@ -1812,9 +1812,9 @@ static int nfs_unlink(struct inode *dir,
 	dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
 		dir->i_ino, dentry->d_name.name);
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_count > 1) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		/* Start asynchronous writeout of the inode */
 		write_inode_now(dentry->d_inode, 0);
 		error = nfs_sillyrename(dir, dentry);
@@ -1824,7 +1824,7 @@ static int nfs_unlink(struct inode *dir,
 		__d_drop(dentry);
 		need_rehash = 1;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	error = nfs_safe_remove(dentry);
 	if (!error || error == -ENOENT) {
 		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
Index: tip/fs/nfs/getroot.c
===================================================================
--- tip.orig/fs/nfs/getroot.c
+++ tip/fs/nfs/getroot.c
@@ -64,9 +64,9 @@ static int nfs_superblock_set_dummy_root
 		 * Oops, since the test for IS_ROOT() will fail.
 		 */
 		spin_lock(&sb->s_root->d_inode->i_lock);
-		spin_lock(&sb->s_root->d_lock);
+		seq_spin_lock(&sb->s_root->d_lock);
 		list_del_init(&sb->s_root->d_alias);
-		spin_unlock(&sb->s_root->d_lock);
+		seq_spin_unlock(&sb->s_root->d_lock);
 		spin_unlock(&sb->s_root->d_inode->i_lock);
 	}
 	return 0;
@@ -126,12 +126,12 @@ struct dentry *nfs_get_root(struct super
 	}
 
 	security_d_instantiate(ret, inode);
-	spin_lock(&ret->d_lock);
+	seq_spin_lock(&ret->d_lock);
 	if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
 		ret->d_fsdata = name;
 		name = NULL;
 	}
-	spin_unlock(&ret->d_lock);
+	seq_spin_unlock(&ret->d_lock);
 out:
 	if (name)
 		kfree(name);
@@ -250,12 +250,12 @@ struct dentry *nfs4_get_root(struct supe
 	}
 
 	security_d_instantiate(ret, inode);
-	spin_lock(&ret->d_lock);
+	seq_spin_lock(&ret->d_lock);
 	if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
 		ret->d_fsdata = name;
 		name = NULL;
 	}
-	spin_unlock(&ret->d_lock);
+	seq_spin_unlock(&ret->d_lock);
 out:
 	if (name)
 		kfree(name);
Index: tip/fs/nfs/namespace.c
===================================================================
--- tip.orig/fs/nfs/namespace.c
+++ tip/fs/nfs/namespace.c
@@ -60,7 +60,7 @@ rename_retry:
 	seq = read_seqbegin(&rename_lock);
 	rcu_read_lock();
 	while (1) {
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (IS_ROOT(dentry))
 			break;
 		namelen = dentry->d_name.len;
@@ -70,17 +70,17 @@ rename_retry:
 		end -= namelen;
 		memcpy(end, dentry->d_name.name, namelen);
 		*--end = '/';
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		dentry = dentry->d_parent;
 	}
 	if (read_seqretry(&rename_lock, seq)) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		rcu_read_unlock();
 		goto rename_retry;
 	}
 	if (*end != '/') {
 		if (--buflen < 0) {
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			rcu_read_unlock();
 			goto Elong;
 		}
@@ -89,7 +89,7 @@ rename_retry:
 	*p = end;
 	base = dentry->d_fsdata;
 	if (!base) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		rcu_read_unlock();
 		WARN_ON(1);
 		return end;
@@ -100,17 +100,17 @@ rename_retry:
 		namelen--;
 	buflen -= namelen;
 	if (buflen < 0) {
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		rcu_read_unlock();
 		goto Elong;
 	}
 	end -= namelen;
 	memcpy(end, base, namelen);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	rcu_read_unlock();
 	return end;
 Elong_unlock:
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	rcu_read_unlock();
 	if (read_seqretry(&rename_lock, seq))
 		goto rename_retry;
Index: tip/fs/nfs/unlink.c
===================================================================
--- tip.orig/fs/nfs/unlink.c
+++ tip/fs/nfs/unlink.c
@@ -156,7 +156,7 @@ static int nfs_do_call_unlink(struct den
 		 */
 		nfs_free_dname(data);
 		ret = nfs_copy_dname(alias, data);
-		spin_lock(&alias->d_lock);
+		seq_spin_lock(&alias->d_lock);
 		if (ret == 0 && alias->d_inode != NULL &&
 		    !(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
 			devname_garbage = alias->d_fsdata;
@@ -165,7 +165,7 @@ static int nfs_do_call_unlink(struct den
 			ret = 1;
 		} else
 			ret = 0;
-		spin_unlock(&alias->d_lock);
+		seq_spin_unlock(&alias->d_lock);
 		nfs_dec_sillycount(dir);
 		dput(alias);
 		/*
@@ -274,13 +274,13 @@ nfs_async_unlink(struct inode *dir, stru
 	data->res.dir_attr = &data->dir_attr;
 
 	status = -EBUSY;
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
 		goto out_unlock;
 	dentry->d_flags |= DCACHE_NFSFS_RENAMED;
 	devname_garbage = dentry->d_fsdata;
 	dentry->d_fsdata = data;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	/*
 	 * If we'd displaced old cached devname, free it.  At that
 	 * point dentry is definitely not a root, so we won't need
@@ -290,7 +290,7 @@ nfs_async_unlink(struct inode *dir, stru
 		kfree(devname_garbage);
 	return 0;
 out_unlock:
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	put_rpccred(data->cred);
 out_free:
 	kfree(data);
@@ -312,13 +312,13 @@ nfs_complete_unlink(struct dentry *dentr
 {
 	struct nfs_unlinkdata	*data = NULL;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
 		dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
 		data = dentry->d_fsdata;
 		dentry->d_fsdata = NULL;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 
 	if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
 		nfs_free_unlinkdata(data);
@@ -328,17 +328,17 @@ nfs_complete_unlink(struct dentry *dentr
 static void
 nfs_cancel_async_unlink(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
 		struct nfs_unlinkdata *data = dentry->d_fsdata;
 
 		dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
 		dentry->d_fsdata = NULL;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		nfs_free_unlinkdata(data);
 		return;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 struct nfs_renamedata {
Index: tip/fs/nilfs2/namei.c
===================================================================
--- tip.orig/fs/nilfs2/namei.c
+++ tip/fs/nilfs2/namei.c
@@ -538,11 +538,11 @@ static int nilfs_encode_fh(struct dentry
 	if (connectable && !S_ISDIR(inode->i_mode)) {
 		struct inode *parent;
 
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		parent = dentry->d_parent->d_inode;
 		fid->parent_ino = parent->i_ino;
 		fid->parent_gen = parent->i_generation;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 
 		type = FILEID_NILFS_WITH_PARENT;
 		*lenp = NILFS_FID_SIZE_CONNECTABLE;
Index: tip/fs/notify/fsnotify.c
===================================================================
--- tip.orig/fs/notify/fsnotify.c
+++ tip/fs/notify/fsnotify.c
@@ -69,19 +69,19 @@ void __fsnotify_update_child_dentry_flag
 		/* run all of the children of the original inode and fix their
 		 * d_flags to indicate parental interest (their parent is the
 		 * original inode) */
-		spin_lock(&alias->d_lock);
+		seq_spin_lock(&alias->d_lock);
 		list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
 			if (!child->d_inode)
 				continue;
 
-			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+			seq_spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
 			if (watched)
 				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
 			else
 				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
-			spin_unlock(&child->d_lock);
+			seq_spin_unlock(&child->d_lock);
 		}
-		spin_unlock(&alias->d_lock);
+		seq_spin_unlock(&alias->d_lock);
 	}
 	spin_unlock(&inode->i_lock);
 }
Index: tip/fs/notify/vfsmount_mark.c
===================================================================
--- tip.orig/fs/notify/vfsmount_mark.c
+++ tip/fs/notify/vfsmount_mark.c
@@ -37,13 +37,13 @@ void fsnotify_clear_marks_by_mount(struc
 	struct mount *m = real_mount(mnt);
 	LIST_HEAD(free_list);
 
-	spin_lock(&mnt->mnt_root->d_lock);
+	seq_spin_lock(&mnt->mnt_root->d_lock);
 	hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) {
 		list_add(&mark->m.free_m_list, &free_list);
 		hlist_del_init_rcu(&mark->m.m_list);
 		fsnotify_get_mark(mark);
 	}
-	spin_unlock(&mnt->mnt_root->d_lock);
+	seq_spin_unlock(&mnt->mnt_root->d_lock);
 
 	list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
 		fsnotify_destroy_mark(mark);
@@ -66,7 +66,7 @@ static void fsnotify_recalc_vfsmount_mas
 	struct hlist_node *pos;
 	__u32 new_mask = 0;
 
-	assert_spin_locked(&mnt->mnt_root->d_lock);
+	assert_seq_spin_locked(&mnt->mnt_root->d_lock);
 
 	hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list)
 		new_mask |= mark->mask;
@@ -79,9 +79,9 @@ static void fsnotify_recalc_vfsmount_mas
  */
 void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
 {
-	spin_lock(&mnt->mnt_root->d_lock);
+	seq_spin_lock(&mnt->mnt_root->d_lock);
 	fsnotify_recalc_vfsmount_mask_locked(mnt);
-	spin_unlock(&mnt->mnt_root->d_lock);
+	seq_spin_unlock(&mnt->mnt_root->d_lock);
 }
 
 void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
@@ -91,14 +91,14 @@ void fsnotify_destroy_vfsmount_mark(stru
 	assert_spin_locked(&mark->lock);
 	assert_spin_locked(&mark->group->mark_lock);
 
-	spin_lock(&mnt->mnt_root->d_lock);
+	seq_spin_lock(&mnt->mnt_root->d_lock);
 
 	hlist_del_init_rcu(&mark->m.m_list);
 	mark->m.mnt = NULL;
 
 	fsnotify_recalc_vfsmount_mask_locked(mnt);
 
-	spin_unlock(&mnt->mnt_root->d_lock);
+	seq_spin_unlock(&mnt->mnt_root->d_lock);
 }
 
 static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
@@ -108,7 +108,7 @@ static struct fsnotify_mark *fsnotify_fi
 	struct fsnotify_mark *mark;
 	struct hlist_node *pos;
 
-	assert_spin_locked(&mnt->mnt_root->d_lock);
+	assert_seq_spin_locked(&mnt->mnt_root->d_lock);
 
 	hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) {
 		if (mark->group == group) {
@@ -128,9 +128,9 @@ struct fsnotify_mark *fsnotify_find_vfsm
 {
 	struct fsnotify_mark *mark;
 
-	spin_lock(&mnt->mnt_root->d_lock);
+	seq_spin_lock(&mnt->mnt_root->d_lock);
 	mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
-	spin_unlock(&mnt->mnt_root->d_lock);
+	seq_spin_unlock(&mnt->mnt_root->d_lock);
 
 	return mark;
 }
@@ -154,7 +154,7 @@ int fsnotify_add_vfsmount_mark(struct fs
 	assert_spin_locked(&mark->lock);
 	assert_spin_locked(&group->mark_lock);
 
-	spin_lock(&mnt->mnt_root->d_lock);
+	seq_spin_lock(&mnt->mnt_root->d_lock);
 
 	mark->m.mnt = mnt;
 
@@ -189,7 +189,7 @@ int fsnotify_add_vfsmount_mark(struct fs
 	hlist_add_after_rcu(last, &mark->m.m_list);
 out:
 	fsnotify_recalc_vfsmount_mask_locked(mnt);
-	spin_unlock(&mnt->mnt_root->d_lock);
+	seq_spin_unlock(&mnt->mnt_root->d_lock);
 
 	return ret;
 }
Index: tip/fs/ocfs2/dcache.c
===================================================================
--- tip.orig/fs/ocfs2/dcache.c
+++ tip/fs/ocfs2/dcache.c
@@ -177,16 +177,16 @@ struct dentry *ocfs2_find_local_alias(st
 	list_for_each(p, &inode->i_dentry) {
 		dentry = list_entry(p, struct dentry, d_alias);
 
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
 			trace_ocfs2_find_local_alias(dentry->d_name.len,
 						     dentry->d_name.name);
 
 			dget_dlock(dentry);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			break;
 		}
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 
 		dentry = NULL;
 	}
Index: tip/fs/ocfs2/export.c
===================================================================
--- tip.orig/fs/ocfs2/export.c
+++ tip/fs/ocfs2/export.c
@@ -214,7 +214,7 @@ static int ocfs2_encode_fh(struct dentry
 	if (connectable && !S_ISDIR(inode->i_mode)) {
 		struct inode *parent;
 
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 
 		parent = dentry->d_parent->d_inode;
 		blkno = OCFS2_I(parent)->ip_blkno;
@@ -224,7 +224,7 @@ static int ocfs2_encode_fh(struct dentry
 		fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
 		fh[5] = cpu_to_le32(generation);
 
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 
 		len = 6;
 		type = 2;
Index: tip/fs/reiserfs/inode.c
===================================================================
--- tip.orig/fs/reiserfs/inode.c
+++ tip/fs/reiserfs/inode.c
@@ -1614,7 +1614,7 @@ int reiserfs_encode_fh(struct dentry *de
 	if (maxlen < 5 || !need_parent)
 		return 3;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	inode = dentry->d_parent->d_inode;
 	data[3] = inode->i_ino;
 	data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
@@ -1623,7 +1623,7 @@ int reiserfs_encode_fh(struct dentry *de
 		data[5] = inode->i_generation;
 		*lenp = 6;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return *lenp;
 }
 
Index: tip/fs/udf/namei.c
===================================================================
--- tip.orig/fs/udf/namei.c
+++ tip/fs/udf/namei.c
@@ -1296,13 +1296,13 @@ static int udf_encode_fh(struct dentry *
 	fid->udf.generation = inode->i_generation;
 
 	if (connectable && !S_ISDIR(inode->i_mode)) {
-		spin_lock(&de->d_lock);
+		seq_spin_lock(&de->d_lock);
 		inode = de->d_parent->d_inode;
 		location = UDF_I(inode)->i_location;
 		fid->udf.parent_block = location.logicalBlockNum;
 		fid->udf.parent_partref = location.partitionReferenceNum;
 		fid->udf.parent_generation = inode->i_generation;
-		spin_unlock(&de->d_lock);
+		seq_spin_unlock(&de->d_lock);
 		*lenp = 5;
 		type = FILEID_UDF_WITH_PARENT;
 	}
Index: tip/fs/xfs/xfs_export.c
===================================================================
--- tip.orig/fs/xfs/xfs_export.c
+++ tip/fs/xfs/xfs_export.c
@@ -97,20 +97,20 @@ xfs_fs_encode_fh(
 
 	switch (fileid_type) {
 	case FILEID_INO32_GEN_PARENT:
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
 		fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		/*FALLTHRU*/
 	case FILEID_INO32_GEN:
 		fid->i32.ino = XFS_I(inode)->i_ino;
 		fid->i32.gen = inode->i_generation;
 		break;
 	case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
 		fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 		/*FALLTHRU*/
 	case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
 		fid64->ino = XFS_I(inode)->i_ino;
Index: tip/include/linux/dcache.h
===================================================================
--- tip.orig/include/linux/dcache.h
+++ tip/include/linux/dcache.h
@@ -88,7 +88,8 @@ extern unsigned int full_name_hash(const
 struct dentry {
 	/* RCU lookup touched fields */
 	unsigned int d_flags;		/* protected by d_lock */
-	seqcount_t d_seq;		/* per dentry seqlock */
+	unsigned int d_count;		/* (Ref lookup) protected by d_lock */
+	seqlock_t d_lock;		/* per dentry seqlock */
 	struct hlist_bl_node d_hash;	/* lookup hash list */
 	struct dentry *d_parent;	/* parent directory */
 	struct qstr d_name;
@@ -97,8 +98,6 @@ struct dentry {
 	unsigned char d_iname[DNAME_INLINE_LEN];	/* small names */
 
 	/* Ref lookup also touches following */
-	unsigned int d_count;		/* protected by d_lock */
-	spinlock_t d_lock;		/* per dentry lock */
 	const struct dentry_operations *d_op;
 	struct super_block *d_sb;	/* The root of the dentry tree */
 	unsigned long d_time;		/* used by d_revalidate */
@@ -298,8 +297,8 @@ static inline int __d_rcu_to_refcount(st
 {
 	int ret = 0;
 
-	assert_spin_locked(&dentry->d_lock);
-	if (!read_seqcount_retry(&dentry->d_seq, seq)) {
+	assert_seq_spin_locked(&dentry->d_lock);
+	if (!read_seqretry(&dentry->d_lock, seq)) {
 		ret = 1;
 		dentry->d_count++;
 	}
@@ -342,9 +341,9 @@ static inline struct dentry *dget_dlock(
 static inline struct dentry *dget(struct dentry *dentry)
 {
 	if (dentry) {
-		spin_lock(&dentry->d_lock);
+		seq_spin_lock(&dentry->d_lock);
 		dget_dlock(dentry);
-		spin_unlock(&dentry->d_lock);
+		seq_spin_unlock(&dentry->d_lock);
 	}
 	return dentry;
 }
@@ -375,9 +374,9 @@ static inline int cant_mount(struct dent
 
 static inline void dont_mount(struct dentry *dentry)
 {
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	dentry->d_flags |= DCACHE_CANT_MOUNT;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 extern void dput(struct dentry *);
Index: tip/include/linux/fs.h
===================================================================
--- tip.orig/include/linux/fs.h
+++ tip/include/linux/fs.h
@@ -2569,9 +2569,9 @@ static inline ino_t parent_ino(struct de
 	 * Don't strictly need d_lock here? If the parent ino could change
 	 * then surely we'd have a deeper race in the caller?
 	 */
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	res = dentry->d_parent->d_inode->i_ino;
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 	return res;
 }
 
Index: tip/include/linux/fsnotify_backend.h
===================================================================
--- tip.orig/include/linux/fsnotify_backend.h
+++ tip/include/linux/fsnotify_backend.h
@@ -329,7 +329,7 @@ static inline void __fsnotify_update_dca
 {
 	struct dentry *parent;
 
-	assert_spin_locked(&dentry->d_lock);
+	assert_seq_spin_locked(&dentry->d_lock);
 
 	/*
 	 * Serialisation of setting PARENT_WATCHED on the dentries is provided
@@ -353,9 +353,9 @@ static inline void __fsnotify_d_instanti
 	if (!inode)
 		return;
 
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	__fsnotify_update_dcache_flags(dentry);
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 /* called from fsnotify listeners, such as fanotify or dnotify */
Index: tip/kernel/cgroup.c
===================================================================
--- tip.orig/kernel/cgroup.c
+++ tip/kernel/cgroup.c
@@ -887,29 +887,29 @@ static void cgroup_clear_directory(struc
 	struct list_head *node;
 
 	BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
-	spin_lock(&dentry->d_lock);
+	seq_spin_lock(&dentry->d_lock);
 	node = dentry->d_subdirs.next;
 	while (node != &dentry->d_subdirs) {
 		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
 
-		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
 		list_del_init(node);
 		if (d->d_inode) {
 			/* This should never be called on a cgroup
 			 * directory with child cgroups */
 			BUG_ON(d->d_inode->i_mode & S_IFDIR);
 			dget_dlock(d);
-			spin_unlock(&d->d_lock);
-			spin_unlock(&dentry->d_lock);
+			seq_spin_unlock(&d->d_lock);
+			seq_spin_unlock(&dentry->d_lock);
 			d_delete(d);
 			simple_unlink(dentry->d_inode, d);
 			dput(d);
-			spin_lock(&dentry->d_lock);
+			seq_spin_lock(&dentry->d_lock);
 		} else
-			spin_unlock(&d->d_lock);
+			seq_spin_unlock(&d->d_lock);
 		node = dentry->d_subdirs.next;
 	}
-	spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
 }
 
 /*
@@ -922,11 +922,11 @@ static void cgroup_d_remove_dir(struct d
 	cgroup_clear_directory(dentry);
 
 	parent = dentry->d_parent;
-	spin_lock(&parent->d_lock);
-	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+	seq_spin_lock(&parent->d_lock);
+	seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 	list_del_init(&dentry->d_u.d_child);
-	spin_unlock(&dentry->d_lock);
-	spin_unlock(&parent->d_lock);
+	seq_spin_unlock(&dentry->d_lock);
+	seq_spin_unlock(&parent->d_lock);
 	remove_dir(dentry);
 }
 
Index: tip/net/sunrpc/rpc_pipe.c
===================================================================
--- tip.orig/net/sunrpc/rpc_pipe.c
+++ tip/net/sunrpc/rpc_pipe.c
@@ -396,14 +396,14 @@ rpc_info_open(struct inode *inode, struc
 	if (!ret) {
 		struct seq_file *m = file->private_data;
 
-		spin_lock(&file->f_path.dentry->d_lock);
+		seq_spin_lock(&file->f_path.dentry->d_lock);
 		if (!d_unhashed(file->f_path.dentry))
 			clnt = RPC_I(inode)->private;
 		if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
-			spin_unlock(&file->f_path.dentry->d_lock);
+			seq_spin_unlock(&file->f_path.dentry->d_lock);
 			m->private = clnt;
 		} else {
-			spin_unlock(&file->f_path.dentry->d_lock);
+			seq_spin_unlock(&file->f_path.dentry->d_lock);
 			single_release(inode, file);
 			ret = -EINVAL;
 		}
Index: tip/security/selinux/selinuxfs.c
===================================================================
--- tip.orig/security/selinux/selinuxfs.c
+++ tip/security/selinux/selinuxfs.c
@@ -1188,28 +1188,28 @@ static void sel_remove_entries(struct de
 {
 	struct list_head *node;
 
-	spin_lock(&de->d_lock);
+	seq_spin_lock(&de->d_lock);
 	node = de->d_subdirs.next;
 	while (node != &de->d_subdirs) {
 		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
 
-		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+		seq_spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
 		list_del_init(node);
 
 		if (d->d_inode) {
 			dget_dlock(d);
-			spin_unlock(&de->d_lock);
-			spin_unlock(&d->d_lock);
+			seq_spin_unlock(&de->d_lock);
+			seq_spin_unlock(&d->d_lock);
 			d_delete(d);
 			simple_unlink(de->d_inode, d);
 			dput(d);
-			spin_lock(&de->d_lock);
+			seq_spin_lock(&de->d_lock);
 		} else
-			spin_unlock(&d->d_lock);
+			seq_spin_unlock(&d->d_lock);
 		node = de->d_subdirs.next;
 	}
 
-	spin_unlock(&de->d_lock);
+	seq_spin_unlock(&de->d_lock);
 }
 
 #define BOOL_DIR_NAME "booleans"



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 11:44 [patch 0/5] seqlock consolidation Thomas Gleixner
                   ` (4 preceding siblings ...)
  2012-03-15 11:44 ` [patch 5/5] fs: Use seqlock in struct dentry Thomas Gleixner
@ 2012-03-15 12:21 ` Al Viro
  2012-03-15 12:28   ` Thomas Gleixner
  5 siblings, 1 reply; 17+ messages in thread
From: Al Viro @ 2012-03-15 12:21 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, Mar 15, 2012 at 11:44:22AM -0000, Thomas Gleixner wrote:

> Aside of that replacing open coded constructs with proper functions is
> a worthwhile cleanup by itself.

Provided that those are proper primitives to start with...  I don't like
it - most of ->d_lock uses are _not_ related to ->d_seq, to start with
and then we get an interesting mix of functions that do and do not assume
the lock already taken, etc.

Could you describe RT patch problems in more details?  I really don't like
this solution - interface is overcomplicated and doesn't fit well...

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 12:21 ` [patch 0/5] seqlock consolidation Al Viro
@ 2012-03-15 12:28   ` Thomas Gleixner
  2012-03-15 17:43     ` Al Viro
  0 siblings, 1 reply; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 12:28 UTC (permalink / raw)
  To: Al Viro; +Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, 15 Mar 2012, Al Viro wrote:

> On Thu, Mar 15, 2012 at 11:44:22AM -0000, Thomas Gleixner wrote:
> 
> > Aside of that replacing open coded constructs with proper functions is
> > a worthwhile cleanup by itself.
> 
> Provided that those are proper primitives to start with...  I don't like
> it - most of ->d_lock uses are _not_ related to ->d_seq, to start with
> and then we get an interesting mix of functions that do and do not assume
> the lock already taken, etc.
> 
> Could you describe RT patch problems in more details?  I really don't like
> this solution - interface is overcomplicated and doesn't fit well...

On RT the spinlock sections (except the raw_spinlock) ones are
becoming preemtible as we replace the spinlocks by "sleeping
spinlock", a PI aware rtmutex.

So now assume the following:

 spin_lock(d->d_lock);
 ....
 d->d_seq++;

 -> Preemption
    read_seqcount_begin()

     repeat:
        ret = ACCESS_ONCE(sl->sequence);
        if (unlikely(ret & 1)) {
                cpu_relax();
                goto repeat;
        }
        smp_rmb();

Now when the preempting task has higher prioriy, the thing loops
forever. Fail ....

The only way out is to lock d->d_lock which is contended, so the
reader side boosts the writer and waits for the lock to be
released. Though with the open coded seqlock we have no idea which
lock we need to take.

Any better ideas ?

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 1/5] seqlock: Remove unused functions
  2012-03-15 11:44 ` [patch 1/5] seqlock: Remove unused functions Thomas Gleixner
@ 2012-03-15 16:29   ` Linus Torvalds
  2012-03-15 17:39     ` Al Viro
  0 siblings, 1 reply; 17+ messages in thread
From: Linus Torvalds @ 2012-03-15 16:29 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: LKML, Al Viro, Ingo Molnar, Peter Zijlstra, Nick Piggin

So I have to say, I hate this entire series.

Seriously, what the heck is the point of this churn? It's all entirely
pointless searc-and-replace as far as I can tell, with absolutely zero
upside.

It makes the low-level filesystems have to be aware of things that
they don't want to know and *shouldn't* know. Why should a filesystem
care that d_lock is a seqlock, and have to use a locking function that
they've never seen before and is very specialized?

The "seq" part of the dentry is something only the lookup code and the
internal dentry code should care about. NOBODY ELSE should ever care.

Also, we have actually tried to largely split the D$ lines, so the
d_seq field isn't even necessarily in the same cacheline as the d_lock
part. Very much on purpose: the beginning of the 'struct dentry' is
largely read-only for the lookup part, and can (hopefully) actually be
shared across CPU's for hot directory entries.

Sure, we may have screwed that up, and maybe it turns out that we
write to it too much, but it really was the *intention*. And you
fundamentally and totally screwed that up, and put the largely
read-only sequence count next to the d_lock thing.

So quite frankly, I think the whole series is total and utter garbage.
And there isn't even any *explanation" for the garbage. You say that
you are unifying things, but dammit, in order to unify them you end up
*adding*new*f&^#ing*code*. You add all those "seq_spin_trylock()" etc
counters that really shouldn't be added because nobody needs them, but
you have to add them because you turned what was a perfectly good
spinlock into a seq_spinlock.

I didn't do a full line count, but I think you added more lines than
you removed. The *only* actual removal of code was the few little "use
a seq_spin_init()" instead of initializing the sequence count and
spinlocks separately. Everything else was just search-and-replace with
less common functions.  And addition of those special function code.

Maybe there is some huge advantage that I'm missing - like the fact
that you could optimize the code using some very special new hardware
transactional memory trick that you have pre-production hardware for
now. But dammit, if that is the case, you should have written that out
in some big letters and explained exactly why you are sending out this
series that seems to be a lot of stupid code churn and that actually
makes the code noticeably worse, bigger, and less flexible.

So a honking big NAK on this whole series unless you can explain with
numbers and show with numbers what the advantage of the abortion is.

                               Linus

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 1/5] seqlock: Remove unused functions
  2012-03-15 16:29   ` Linus Torvalds
@ 2012-03-15 17:39     ` Al Viro
  2012-03-15 17:53       ` Thomas Gleixner
  0 siblings, 1 reply; 17+ messages in thread
From: Al Viro @ 2012-03-15 17:39 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Thomas Gleixner, LKML, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, Mar 15, 2012 at 09:29:50AM -0700, Linus Torvalds wrote:
> So I have to say, I hate this entire series.
> 
> Seriously, what the heck is the point of this churn? It's all entirely
> pointless searc-and-replace as far as I can tell, with absolutely zero
> upside.
> 
> It makes the low-level filesystems have to be aware of things that
> they don't want to know and *shouldn't* know. Why should a filesystem
> care that d_lock is a seqlock, and have to use a locking function that
> they've never seen before and is very specialized?
> 
> The "seq" part of the dentry is something only the lookup code and the
> internal dentry code should care about. NOBODY ELSE should ever care.

*nod*

There's another issue I have with that on API level, leaving aside any
questions of that being a good fit for dcache.  It's simply a bad interface:
we have variants that lock and play with d_seq, variants that play with
d_seq alone and, most commonly used, variant that locks but does not
touch d_seq at all.  IOW, we have traded "writes to d_seq must be under
d_lock" with "update-seq-without-locking primitive must be used after we'd
used lock-without-touching-seq one".  Which is not an improvement at all.
Sure, you can make a direct product out of anything; that doesn't make
the result a natural object.

The _only_ relationship between d_seq and d_lock is that the latter happens
to be serializing updates of the former.  For RT there's another one -
->d_lock taken to protect ->d_seq modifications really should not be
preempted in favour of anything that might do read_seqcount_begin on
->d_seq.  The biggest such section is in __d_move(), AFAICS, and it's not
_that_ big; can't RT simply have them protected by whatever it has that
really prevents preempt?

IOW, instead of all that stuff, how about
	about_to_modify_seq_holding_lock(&dentry->d_seq, &dentry->d_lock);
	done_modifying_seq(&dentry->d_seq, &dentry->d_lock);
around those 3 or 4 areas in fs/dcache.c, to give RT the missing information?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 12:28   ` Thomas Gleixner
@ 2012-03-15 17:43     ` Al Viro
  2012-03-15 17:55       ` Thomas Gleixner
  0 siblings, 1 reply; 17+ messages in thread
From: Al Viro @ 2012-03-15 17:43 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, Mar 15, 2012 at 01:28:03PM +0100, Thomas Gleixner wrote:

> The only way out is to lock d->d_lock which is contended, so the
> reader side boosts the writer and waits for the lock to be
> released. Though with the open coded seqlock we have no idea which
> lock we need to take.
> 
> Any better ideas ?

So basically you want RT stuff to know that several areas in fs/dcache.c
(from write_seqcount_begin() to write_seqcount_end()) are protected by
(already held by that point) ->d_lock of corresponding dentries?

If that's it, I suggest to look for a solution that would express just that...
Or do you want something on the reader side as well?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 1/5] seqlock: Remove unused functions
  2012-03-15 17:39     ` Al Viro
@ 2012-03-15 17:53       ` Thomas Gleixner
  0 siblings, 0 replies; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 17:53 UTC (permalink / raw)
  To: Al Viro; +Cc: Linus Torvalds, LKML, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, 15 Mar 2012, Al Viro wrote:
> On Thu, Mar 15, 2012 at 09:29:50AM -0700, Linus Torvalds wrote:
> > So I have to say, I hate this entire series.
> > 
> > Seriously, what the heck is the point of this churn? It's all entirely
> > pointless searc-and-replace as far as I can tell, with absolutely zero
> > upside.
> > 
> > It makes the low-level filesystems have to be aware of things that
> > they don't want to know and *shouldn't* know. Why should a filesystem
> > care that d_lock is a seqlock, and have to use a locking function that
> > they've never seen before and is very specialized?
> > 
> > The "seq" part of the dentry is something only the lookup code and the
> > internal dentry code should care about. NOBODY ELSE should ever care.
> 
> *nod*
> 
> There's another issue I have with that on API level, leaving aside any
> questions of that being a good fit for dcache.  It's simply a bad interface:
> we have variants that lock and play with d_seq, variants that play with
> d_seq alone and, most commonly used, variant that locks but does not
> touch d_seq at all.  IOW, we have traded "writes to d_seq must be under
> d_lock" with "update-seq-without-locking primitive must be used after we'd
> used lock-without-touching-seq one".  Which is not an improvement at all.
> Sure, you can make a direct product out of anything; that doesn't make
> the result a natural object.
> 
> The _only_ relationship between d_seq and d_lock is that the latter happens
> to be serializing updates of the former.  For RT there's another one -
> ->d_lock taken to protect ->d_seq modifications really should not be
> preempted in favour of anything that might do read_seqcount_begin on
> ->d_seq.  The biggest such section is in __d_move(), AFAICS, and it's not
> _that_ big; can't RT simply have them protected by whatever it has that
> really prevents preempt?
> 
> IOW, instead of all that stuff, how about
> 	about_to_modify_seq_holding_lock(&dentry->d_seq, &dentry->d_lock);
> 	done_modifying_seq(&dentry->d_seq, &dentry->d_lock);
> around those 3 or 4 areas in fs/dcache.c, to give RT the missing information?

Fair enough. I thought about that earlier, but I was looking for a
solution which does not required to add extra code to every place
where the sequence count is updated. I accept, that I went overboard
with that approach.

Just come up with another idea, which restricts the lock annotation to
the init function.

struct seqcount {
       unsigned int seq;
#ifdef CONFIG_LOCKDEP       
       spinlock_t *lock;
#endif
};

	seqcount_init(&seq, &protecting_lock);

That way we could do the lock held assertions in the write_seqcount
functions when LOCKDEP is enabled instead of having them in the code
which uses the write_seqcount functions.

RT could enable that as well and use it for its own purposes. Would
something like that work for you?

Thanks,

	tglx







^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 17:43     ` Al Viro
@ 2012-03-15 17:55       ` Thomas Gleixner
  2012-03-15 18:39         ` Al Viro
  0 siblings, 1 reply; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 17:55 UTC (permalink / raw)
  To: Al Viro; +Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, 15 Mar 2012, Al Viro wrote:

> On Thu, Mar 15, 2012 at 01:28:03PM +0100, Thomas Gleixner wrote:
> 
> > The only way out is to lock d->d_lock which is contended, so the
> > reader side boosts the writer and waits for the lock to be
> > released. Though with the open coded seqlock we have no idea which
> > lock we need to take.
> > 
> > Any better ideas ?
> 
> So basically you want RT stuff to know that several areas in fs/dcache.c
> (from write_seqcount_begin() to write_seqcount_end()) are protected by
> (already held by that point) ->d_lock of corresponding dentries?
> 
> If that's it, I suggest to look for a solution that would express just that...
> Or do you want something on the reader side as well?

The problem is the reader side. If the reader preempts the writer then
the only way to make progress is to take the lock, but therefor I need
to know which lock I should take.

Thanks,

	tglx


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 17:55       ` Thomas Gleixner
@ 2012-03-15 18:39         ` Al Viro
  2012-03-15 19:17           ` Thomas Gleixner
  0 siblings, 1 reply; 17+ messages in thread
From: Al Viro @ 2012-03-15 18:39 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, Mar 15, 2012 at 06:55:18PM +0100, Thomas Gleixner wrote:
> > If that's it, I suggest to look for a solution that would express just that...
> > Or do you want something on the reader side as well?
> 
> The problem is the reader side. If the reader preempts the writer then
> the only way to make progress is to take the lock, but therefor I need
> to know which lock I should take.

So just make writers non-preemptable in those sections.  Really, the
worst non-deterministic behaviour you get for d_seq ones is memcpy()
of up to ->d_name.len bytes.  And on the fs_struct side it's trivial
to reduce the work done in those sections to several comparisons and
assignments.  Not even path_get_longterm() needs to be there - see
below for how it can be done:

diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 78b519c..f5818c4 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -26,11 +26,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
 {
 	struct path old_root;
 
+	path_get_longterm(path);
 	spin_lock(&fs->lock);
 	write_seqcount_begin(&fs->seq);
 	old_root = fs->root;
 	fs->root = *path;
-	path_get_longterm(path);
 	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 	if (old_root.dentry)
@@ -45,11 +45,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
 {
 	struct path old_pwd;
 
+	path_get_longterm(path);
 	spin_lock(&fs->lock);
 	write_seqcount_begin(&fs->seq);
 	old_pwd = fs->pwd;
 	fs->pwd = *path;
-	path_get_longterm(path);
 	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 
@@ -57,6 +57,14 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
 		path_put_longterm(&old_pwd);
 }
 
+static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
+{
+	if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
+		return 0;
+	*p = *new;
+	return 1;
+}
+
 void chroot_fs_refs(struct path *old_root, struct path *new_root)
 {
 	struct task_struct *g, *p;
@@ -68,21 +76,16 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
 		task_lock(p);
 		fs = p->fs;
 		if (fs) {
+			int hits = 0;
 			spin_lock(&fs->lock);
 			write_seqcount_begin(&fs->seq);
-			if (fs->root.dentry == old_root->dentry
-			    && fs->root.mnt == old_root->mnt) {
-				path_get_longterm(new_root);
-				fs->root = *new_root;
+			hits += replace_path(&fs->root, old_root, new_root);
+			hits += replace_path(&fs->pwd, old_root, new_root);
+			write_seqcount_end(&fs->seq);
+			while (hits--) {
 				count++;
-			}
-			if (fs->pwd.dentry == old_root->dentry
-			    && fs->pwd.mnt == old_root->mnt) {
 				path_get_longterm(new_root);
-				fs->pwd = *new_root;
-				count++;
 			}
-			write_seqcount_end(&fs->seq);
 			spin_unlock(&fs->lock);
 		}
 		task_unlock(p);

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 18:39         ` Al Viro
@ 2012-03-15 19:17           ` Thomas Gleixner
  2012-03-15 20:42             ` Al Viro
  0 siblings, 1 reply; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 19:17 UTC (permalink / raw)
  To: Al Viro; +Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, 15 Mar 2012, Al Viro wrote:

> On Thu, Mar 15, 2012 at 06:55:18PM +0100, Thomas Gleixner wrote:
> > > If that's it, I suggest to look for a solution that would express just that...
> > > Or do you want something on the reader side as well?
> > 
> > The problem is the reader side. If the reader preempts the writer then
> > the only way to make progress is to take the lock, but therefor I need
> > to know which lock I should take.
> 
> So just make writers non-preemptable in those sections.  Really, the
> worst non-deterministic behaviour you get for d_seq ones is memcpy()
> of up to ->d_name.len bytes.  And on the fs_struct side it's trivial
> to reduce the work done in those sections to several comparisons and
> assignments.  Not even path_get_longterm() needs to be there - see
> below for how it can be done:

Yeah, path_get_longterm() was what worried me due to dget() taking
d_lock, but yeah, I'm happy to avoid all that churn that way.

Thanks a lot!
 
>  		if (fs) {
> +			int hits = 0;
>  			spin_lock(&fs->lock);
>  			write_seqcount_begin(&fs->seq);
> -			if (fs->root.dentry == old_root->dentry
> -			    && fs->root.mnt == old_root->mnt) {
> -				path_get_longterm(new_root);
> -				fs->root = *new_root;
> +			hits += replace_path(&fs->root, old_root, new_root);
> +			hits += replace_path(&fs->pwd, old_root, new_root);

Wouldn't it be simpler to just do: 

+			count += replace_path(&fs->root, old_root, new_root);
+			count += replace_path(&fs->pwd, old_root, new_root);

> +			write_seqcount_end(&fs->seq);
> +			while (hits--) {
>  				count++;

Instead of that loop ?

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 19:17           ` Thomas Gleixner
@ 2012-03-15 20:42             ` Al Viro
  2012-03-15 22:08               ` Thomas Gleixner
  0 siblings, 1 reply; 17+ messages in thread
From: Al Viro @ 2012-03-15 20:42 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin

On Thu, Mar 15, 2012 at 08:17:12PM +0100, Thomas Gleixner wrote:
> > +			hits += replace_path(&fs->root, old_root, new_root);
> > +			hits += replace_path(&fs->pwd, old_root, new_root);
> 
> Wouldn't it be simpler to just do: 
> 
> +			count += replace_path(&fs->root, old_root, new_root);
> +			count += replace_path(&fs->pwd, old_root, new_root);
> 
> > +			write_seqcount_end(&fs->seq);
> > +			while (hits--) {
> >  				count++;
> 
> Instead of that loop ?

This loop also contains path_get_longterm() and we need to do it before
dropping fs->lock.  We are holding a reference to new_root, all right,
but once it's place into ->fs->{root,pwd} of another task and ->fs->lock
is dropped, there's nothing to stop that task of doing chdir() and dropping
its reference.  Which could outweight the single reference we are holding
pretty soon...

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [patch 0/5] seqlock consolidation
  2012-03-15 20:42             ` Al Viro
@ 2012-03-15 22:08               ` Thomas Gleixner
  0 siblings, 0 replies; 17+ messages in thread
From: Thomas Gleixner @ 2012-03-15 22:08 UTC (permalink / raw)
  To: Al Viro; +Cc: LKML, Linus Torvalds, Ingo Molnar, Peter Zijlstra, Nick Piggin



On Thu, 15 Mar 2012, Al Viro wrote:

> On Thu, Mar 15, 2012 at 08:17:12PM +0100, Thomas Gleixner wrote:
> > > +			hits += replace_path(&fs->root, old_root, new_root);
> > > +			hits += replace_path(&fs->pwd, old_root, new_root);
> > 
> > Wouldn't it be simpler to just do: 
> > 
> > +			count += replace_path(&fs->root, old_root, new_root);
> > +			count += replace_path(&fs->pwd, old_root, new_root);
> > 
> > > +			write_seqcount_end(&fs->seq);
> > > +			while (hits--) {
> > >  				count++;
> > 
> > Instead of that loop ?
> 
> This loop also contains path_get_longterm() and we need to do it before
> dropping fs->lock.  We are holding a reference to new_root, all right,
> but once it's place into ->fs->{root,pwd} of another task and ->fs->lock
> is dropped, there's nothing to stop that task of doing chdir() and dropping
> its reference.  Which could outweight the single reference we are holding
> pretty soon...

Oops, sorry I missed that path_get_longterm() in the loop due to the
+/- forrest around it.


^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2012-03-15 22:08 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-03-15 11:44 [patch 0/5] seqlock consolidation Thomas Gleixner
2012-03-15 11:44 ` [patch 1/5] seqlock: Remove unused functions Thomas Gleixner
2012-03-15 16:29   ` Linus Torvalds
2012-03-15 17:39     ` Al Viro
2012-03-15 17:53       ` Thomas Gleixner
2012-03-15 11:44 ` [patch 3/5] seqlock: Provide seq_spin_* functions Thomas Gleixner
2012-03-15 11:44 ` [patch 2/5] seqlock: Use seqcount for seqlock Thomas Gleixner
2012-03-15 11:44 ` [patch 4/5] fs: fs_struct use seqlock Thomas Gleixner
2012-03-15 11:44 ` [patch 5/5] fs: Use seqlock in struct dentry Thomas Gleixner
2012-03-15 12:21 ` [patch 0/5] seqlock consolidation Al Viro
2012-03-15 12:28   ` Thomas Gleixner
2012-03-15 17:43     ` Al Viro
2012-03-15 17:55       ` Thomas Gleixner
2012-03-15 18:39         ` Al Viro
2012-03-15 19:17           ` Thomas Gleixner
2012-03-15 20:42             ` Al Viro
2012-03-15 22:08               ` Thomas Gleixner

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox