linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch 2/5] vfs: d_genocide() doesnt add dentries to unused list
  2006-06-01  9:51 [patch 0/5] [PATCH,RFC] vfs: per-superblock unused dentries list (2nd version) jblunck
@ 2006-06-01  9:51 ` jblunck
  0 siblings, 0 replies; 10+ messages in thread
From: jblunck @ 2006-06-01  9:51 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-fsdevel, akpm, viro, dgc, balbir

[-- Attachment #1: patches.jbl/vfs-d_genocide-fix.diff --]
[-- Type: text/plain, Size: 1189 bytes --]

Calling d_genocide() is only lowering the reference count of the dentries but
doesn't add them to the unused list.

Signed-off-by: Jan Blunck <jblunck@suse.de>
---
 fs/dcache.c |   18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

Index: work-2.6/fs/dcache.c
===================================================================
--- work-2.6.orig/fs/dcache.c
+++ work-2.6/fs/dcache.c
@@ -1612,11 +1612,25 @@ resume:
 			this_parent = dentry;
 			goto repeat;
 		}
-		atomic_dec(&dentry->d_count);
+		if (!list_empty(&dentry->d_lru)) {
+			dentry_stat.nr_unused--;
+			list_del_init(&dentry->d_lru);
+		}
+		if (atomic_dec_and_test(&dentry->d_count)) {
+			list_add(&dentry->d_lru, dentry_unused.prev);
+			dentry_stat.nr_unused++;
+		}
 	}
 	if (this_parent != root) {
 		next = this_parent->d_u.d_child.next;
-		atomic_dec(&this_parent->d_count);
+		if (!list_empty(&this_parent->d_lru)) {
+			dentry_stat.nr_unused--;
+			list_del_init(&this_parent->d_lru);
+		}
+		if (atomic_dec_and_test(&this_parent->d_count)) {
+			list_add(&this_parent->d_lru, dentry_unused.prev);
+			dentry_stat.nr_unused++;
+		}
 		this_parent = this_parent->d_parent;
 		goto resume;
 	}


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 0/5] vfs: per-superblock unused dentries list (3rd version)
@ 2006-06-16 10:43 jblunck
  2006-06-16 10:43 ` [PATCH 1/5] vfs: remove whitespace noise from fs/dcache.c jblunck
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: jblunck @ 2006-06-16 10:43 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-fsdevel, akpm, viro, dgc, balbir, neilb

This patch series is an updated version of my original patches. This time the
changes Andrew proposed are adressed. David can you test them wrt the
scan_count stuff?

This is an attempt to have per-superblock unused dentry lists. Since dentries
are lazy-removed from the unused list, one big list doesn't scale very good
wrt systems with a hugh dentry cache. The dcache shrinkers spend a long time
traversing the list under the dcache spinlock.

The patches introduce an additional list_head per superblock holding only the
dentries of the specific superblock. The next dentry can be found quickly so
the shrinkers don't need to hold the dcache lock for long.

One nice side-effect: the "busy inodes after unmount" race is fixed because
prune_dcache() is getting the s_umount lock before it starts working on the
superblock's dentries.

These patches are against torvalds/linux-2.6.git from today.

Comments?
      Jan



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/5] vfs: remove whitespace noise from fs/dcache.c
  2006-06-16 10:43 [PATCH 0/5] vfs: per-superblock unused dentries list (3rd version) jblunck
@ 2006-06-16 10:43 ` jblunck
  2006-06-16 10:43 ` [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list jblunck
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 10+ messages in thread
From: jblunck @ 2006-06-16 10:43 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-fsdevel, akpm, viro, dgc, balbir, neilb

[-- Attachment #1: patches.jbl/vfs-whitespace-cleanup.diff --]
[-- Type: text/plain, Size: 11851 bytes --]

Remove some whitespace noise from fs/dcache.c.

Signed-off-by: Jan Blunck <jblunck@suse.de>
---
 fs/dcache.c |  128 ++++++++++++++++++++++++++++++------------------------------
 1 file changed, 64 insertions(+), 64 deletions(-)

Index: work-2.6/fs/dcache.c
===================================================================
--- work-2.6.orig/fs/dcache.c
+++ work-2.6/fs/dcache.c
@@ -74,7 +74,7 @@ static void d_callback(struct rcu_head *
 
 	if (dname_external(dentry))
 		kfree(dentry->d_name.name);
-	kmem_cache_free(dentry_cache, dentry); 
+	kmem_cache_free(dentry_cache, dentry);
 }
 
 /*
@@ -85,7 +85,7 @@ static void d_free(struct dentry *dentry
 {
 	if (dentry->d_op && dentry->d_op->d_release)
 		dentry->d_op->d_release(dentry);
- 	call_rcu(&dentry->d_u.d_rcu, d_callback);
+	call_rcu(&dentry->d_u.d_rcu, d_callback);
 }
 
 /*
@@ -113,7 +113,7 @@ static void dentry_iput(struct dentry * 
 	}
 }
 
-/* 
+/*
  * This is dput
  *
  * This is complicated by the fact that we do not want to put
@@ -132,7 +132,7 @@ static void dentry_iput(struct dentry * 
 
 /*
  * dput - release a dentry
- * @dentry: dentry to release 
+ * @dentry: dentry to release
  *
  * Release a dentry. This will drop the usage count and if appropriate
  * call the dentry unlink method as well as removing it from the queues and
@@ -168,14 +168,14 @@ repeat:
 			goto unhash_it;
 	}
 	/* Unreachable? Get rid of it */
- 	if (d_unhashed(dentry))
+	if (d_unhashed(dentry))
 		goto kill_it;
-  	if (list_empty(&dentry->d_lru)) {
-  		dentry->d_flags |= DCACHE_REFERENCED;
-  		list_add(&dentry->d_lru, &dentry_unused);
-  		dentry_stat.nr_unused++;
-  	}
- 	spin_unlock(&dentry->d_lock);
+	if (list_empty(&dentry->d_lru)) {
+		dentry->d_flags |= DCACHE_REFERENCED;
+		list_add(&dentry->d_lru, &dentry_unused);
+		dentry_stat.nr_unused++;
+	}
+	spin_unlock(&dentry->d_lock);
 	spin_unlock(&dcache_lock);
 	return;
 
@@ -188,11 +188,11 @@ kill_it: {
 		/* If dentry was on d_lru list
 		 * delete it from there
 		 */
-  		if (!list_empty(&dentry->d_lru)) {
-  			list_del(&dentry->d_lru);
-  			dentry_stat.nr_unused--;
-  		}
-  		list_del(&dentry->d_u.d_child);
+		if (!list_empty(&dentry->d_lru)) {
+			list_del(&dentry->d_lru);
+			dentry_stat.nr_unused--;
+		}
+		list_del(&dentry->d_u.d_child);
 		dentry_stat.nr_dentry--;	/* For d_free, below */
 		/*drops the locks, at that point nobody can reach this dentry */
 		dentry_iput(dentry);
@@ -216,7 +216,7 @@ kill_it: {
  *
  * no dcache lock.
  */
- 
+
 int d_invalidate(struct dentry * dentry)
 {
 	/*
@@ -308,7 +308,7 @@ static struct dentry * __d_find_alias(st
 		next = tmp->next;
 		prefetch(next);
 		alias = list_entry(tmp, struct dentry, d_alias);
- 		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
+		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 			if (alias->d_flags & DCACHE_DISCONNECTED)
 				discon_alias = alias;
 			else if (!want_discon) {
@@ -391,7 +391,7 @@ static inline void prune_one_dentry(stru
  * This function may fail to free any resources if
  * all the dentries are in use.
  */
- 
+
 static void prune_dcache(int count)
 {
 	spin_lock(&dcache_lock);
@@ -406,25 +406,25 @@ static void prune_dcache(int count)
 			break;
 		list_del_init(tmp);
 		prefetch(dentry_unused.prev);
- 		dentry_stat.nr_unused--;
+		dentry_stat.nr_unused--;
 		dentry = list_entry(tmp, struct dentry, d_lru);
 
- 		spin_lock(&dentry->d_lock);
+		spin_lock(&dentry->d_lock);
 		/*
 		 * We found an inuse dentry which was not removed from
 		 * dentry_unused because of laziness during lookup.  Do not free
 		 * it - just keep it off the dentry_unused list.
 		 */
- 		if (atomic_read(&dentry->d_count)) {
- 			spin_unlock(&dentry->d_lock);
+		if (atomic_read(&dentry->d_count)) {
+			spin_unlock(&dentry->d_lock);
 			continue;
 		}
 		/* If the dentry was recently referenced, don't free it. */
 		if (dentry->d_flags & DCACHE_REFERENCED) {
 			dentry->d_flags &= ~DCACHE_REFERENCED;
- 			list_add(&dentry->d_lru, &dentry_unused);
- 			dentry_stat.nr_unused++;
- 			spin_unlock(&dentry->d_lock);
+			list_add(&dentry->d_lru, &dentry_unused);
+			dentry_stat.nr_unused++;
+			spin_unlock(&dentry->d_lock);
 			continue;
 		}
 		prune_one_dentry(dentry);
@@ -499,7 +499,7 @@ repeat:
  * We descend to the next level whenever the d_subdirs
  * list is non-empty and continue searching.
  */
- 
+
 /**
  * have_submounts - check for mounts over a dentry
  * @parent: dentry to check.
@@ -507,7 +507,7 @@ repeat:
  * Return true if the parent or its subdirectories contain
  * a mount point
  */
- 
+
 int have_submounts(struct dentry *parent)
 {
 	struct dentry *this_parent = parent;
@@ -579,8 +579,8 @@ resume:
 			dentry_stat.nr_unused--;
 			list_del_init(&dentry->d_lru);
 		}
-		/* 
-		 * move only zero ref count dentries to the end 
+		/*
+		 * move only zero ref count dentries to the end
 		 * of the unused list for prune_dcache
 		 */
 		if (!atomic_read(&dentry->d_count)) {
@@ -624,7 +624,7 @@ out:
  *
  * Prune the dcache to remove unused children of the parent dentry.
  */
- 
+
 void shrink_dcache_parent(struct dentry * parent)
 {
 	int found;
@@ -657,8 +657,8 @@ void shrink_dcache_anon(struct hlist_hea
 				list_del_init(&this->d_lru);
 			}
 
-			/* 
-			 * move only zero ref count dentries to the end 
+			/*
+			 * move only zero ref count dentries to the end
 			 * of the unused list for prune_dcache
 			 */
 			if (!atomic_read(&this->d_count)) {
@@ -703,25 +703,25 @@ static int shrink_dcache_memory(int nr, 
  * available. On a success the dentry is returned. The name passed in is
  * copied and the copy passed in may be reused after this call.
  */
- 
+
 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
 {
 	struct dentry *dentry;
 	char *dname;
 
-	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 
+	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 	if (!dentry)
 		return NULL;
 
 	if (name->len > DNAME_INLINE_LEN-1) {
 		dname = kmalloc(name->len + 1, GFP_KERNEL);
 		if (!dname) {
-			kmem_cache_free(dentry_cache, dentry); 
+			kmem_cache_free(dentry_cache, dentry);
 			return NULL;
 		}
 	} else  {
 		dname = dentry->d_iname;
-	}	
+	}
 	dentry->d_name.name = dname;
 
 	dentry->d_name.len = name->len;
@@ -786,7 +786,7 @@ struct dentry *d_alloc_name(struct dentr
  * (or otherwise set) by the caller to indicate that it is now
  * in use by the dcache.
  */
- 
+
 void d_instantiate(struct dentry *entry, struct inode * inode)
 {
 	BUG_ON(!list_empty(&entry->d_alias));
@@ -861,7 +861,7 @@ EXPORT_SYMBOL(d_instantiate_unique);
  * instantiated and returned. %NULL is returned if there is insufficient
  * memory or the inode passed is %NULL.
  */
- 
+
 struct dentry * d_alloc_root(struct inode * root_inode)
 {
 	struct dentry *res = NULL;
@@ -892,7 +892,7 @@ static inline struct hlist_head *d_hash(
  * @inode: inode to allocate the dentry for
  *
  * This is similar to d_alloc_root.  It is used by filesystems when
- * creating a dentry for a given inode, often in the process of 
+ * creating a dentry for a given inode, often in the process of
  * mapping a filehandle to a dentry.  The returned dentry may be
  * anonymous, or may have a full name (if the inode was already
  * in the cache).  The file system may need to make further
@@ -923,7 +923,7 @@ struct dentry * d_alloc_anon(struct inod
 		return NULL;
 
 	tmp->d_parent = tmp; /* make sure dput doesn't croak */
-	
+
 	spin_lock(&dcache_lock);
 	res = __d_find_alias(inode, 0);
 	if (!res) {
@@ -1009,7 +1009,7 @@ struct dentry *d_splice_alias(struct ino
  * finished using it. %NULL is returned on failure.
  *
  * __d_lookup is dcache_lock free. The hash list is protected using RCU.
- * Memory barriers are used while updating and doing lockless traversal. 
+ * Memory barriers are used while updating and doing lockless traversal.
  * To avoid races with d_move while rename is happening, d_lock is used.
  *
  * Overflows in memcmp(), while d_move, are avoided by keeping the length
@@ -1032,10 +1032,10 @@ struct dentry * d_lookup(struct dentry *
 	struct dentry * dentry = NULL;
 	unsigned long seq;
 
-        do {
-                seq = read_seqbegin(&rename_lock);
-                dentry = __d_lookup(parent, name);
-                if (dentry)
+	do {
+		seq = read_seqbegin(&rename_lock);
+		dentry = __d_lookup(parent, name);
+		if (dentry)
 			break;
 	} while (read_seqretry(&rename_lock, seq));
 	return dentry;
@@ -1052,7 +1052,7 @@ struct dentry * __d_lookup(struct dentry
 	struct dentry *dentry;
 
 	rcu_read_lock();
-	
+
 	hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
 		struct qstr *qstr;
 
@@ -1094,10 +1094,10 @@ struct dentry * __d_lookup(struct dentry
 		break;
 next:
 		spin_unlock(&dentry->d_lock);
- 	}
- 	rcu_read_unlock();
+	}
+	rcu_read_unlock();
 
- 	return found;
+	return found;
 }
 
 /**
@@ -1137,7 +1137,7 @@ out:
  * This is used by ncpfs in its readdir implementation.
  * Zero is returned in the dentry is invalid.
  */
- 
+
 int d_validate(struct dentry *dentry, struct dentry *dparent)
 {
 	struct hlist_head *base;
@@ -1152,7 +1152,7 @@ int d_validate(struct dentry *dentry, st
 
 	spin_lock(&dcache_lock);
 	base = d_hash(dparent, dentry->d_name.hash);
-	hlist_for_each(lhp,base) { 
+	hlist_for_each(lhp,base) {
 		/* hlist_for_each_entry_rcu() not required for d_hash list
 		 * as it is parsed under dcache_lock
 		 */
@@ -1179,7 +1179,7 @@ out:
  * it from the hash queues and waiting for
  * it to be deleted later when it has no users
  */
- 
+
 /**
  * d_delete - delete a dentry
  * @dentry: The dentry to delete
@@ -1187,7 +1187,7 @@ out:
  * Turn the dentry into a negative dentry if possible, otherwise
  * remove it from the hash queues so it can be deleted later
  */
- 
+
 void d_delete(struct dentry * dentry)
 {
 	int isdir = 0;
@@ -1218,8 +1218,8 @@ void d_delete(struct dentry * dentry)
 static void __d_rehash(struct dentry * entry, struct hlist_head *list)
 {
 
- 	entry->d_flags &= ~DCACHE_UNHASHED;
- 	hlist_add_head_rcu(&entry->d_hash, list);
+	entry->d_flags &= ~DCACHE_UNHASHED;
+	hlist_add_head_rcu(&entry->d_hash, list);
 }
 
 /**
@@ -1228,7 +1228,7 @@ static void __d_rehash(struct dentry * e
  *
  * Adds a dentry to the hash according to its name.
  */
- 
+
 void d_rehash(struct dentry * entry)
 {
 	struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
@@ -1302,7 +1302,7 @@ static void switch_names(struct dentry *
  * up under the name it got deleted rather than the name that
  * deleted it.
  */
- 
+
 /**
  * d_move - move a dentry
  * @dentry: entry to move
@@ -1560,7 +1560,7 @@ out:
  * Returns 0 otherwise.
  * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
  */
-  
+
 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
 {
 	int result;
@@ -1571,7 +1571,7 @@ int is_subdir(struct dentry * new_dentry
 	 * d_move
 	 */
 	rcu_read_lock();
-        do {
+	do {
 		/* for restarting inner loop in case of seq retry */
 		new_dentry = saved;
 		result = 0;
@@ -1636,7 +1636,7 @@ resume:
  * filesystems using synthetic inode numbers, and is necessary
  * to keep getcwd() working.
  */
- 
+
 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
 {
 	struct dentry * dentry;
@@ -1689,10 +1689,10 @@ static void __init dcache_init(unsigned 
 {
 	int loop;
 
-	/* 
+	/*
 	 * A constructor could be added for stable state like the lists,
 	 * but it is probably not worth it because of the cache nature
-	 * of the dcache. 
+	 * of the dcache.
 	 */
 	dentry_cache = kmem_cache_create("dentry_cache",
 					 sizeof(struct dentry),
@@ -1700,7 +1700,7 @@ static void __init dcache_init(unsigned 
 					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
 					 SLAB_MEM_SPREAD),
 					 NULL, NULL);
-	
+
 	set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
 
 	/* Hash may have been set up in dcache_init_early */


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list
  2006-06-16 10:43 [PATCH 0/5] vfs: per-superblock unused dentries list (3rd version) jblunck
  2006-06-16 10:43 ` [PATCH 1/5] vfs: remove whitespace noise from fs/dcache.c jblunck
@ 2006-06-16 10:43 ` jblunck
  2006-06-18 19:34   ` Balbir Singh
  2006-06-16 10:43 ` [PATCH 3/5] vfs: remove shrink_dcache_anon() jblunck
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 10+ messages in thread
From: jblunck @ 2006-06-16 10:43 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-fsdevel, akpm, viro, dgc, balbir, neilb

[-- Attachment #1: patches.jbl/vfs-d_genocide-fix.diff --]
[-- Type: text/plain, Size: 1188 bytes --]

Calling d_genocide() is only lowering the reference count of the dentries but
doesn't add them to the unused list.

Signed-off-by: Jan Blunck <jblunck@suse.de>
---
 fs/dcache.c |   18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

Index: work-2.6/fs/dcache.c
===================================================================
--- work-2.6.orig/fs/dcache.c
+++ work-2.6/fs/dcache.c
@@ -1612,11 +1612,25 @@ resume:
 			this_parent = dentry;
 			goto repeat;
 		}
-		atomic_dec(&dentry->d_count);
+		if (!list_empty(&dentry->d_lru)) {
+			dentry_stat.nr_unused--;
+			list_del_init(&dentry->d_lru);
+		}
+		if (atomic_dec_and_test(&dentry->d_count)) {
+			list_add(&dentry->d_lru, dentry_unused.prev);
+			dentry_stat.nr_unused++;
+		}
 	}
 	if (this_parent != root) {
 		next = this_parent->d_u.d_child.next;
-		atomic_dec(&this_parent->d_count);
+		if (!list_empty(&this_parent->d_lru)) {
+			dentry_stat.nr_unused--;
+			list_del_init(&this_parent->d_lru);
+		}
+		if (atomic_dec_and_test(&this_parent->d_count)) {
+			list_add(&this_parent->d_lru, dentry_unused.prev);
+			dentry_stat.nr_unused++;
+		}
 		this_parent = this_parent->d_parent;
 		goto resume;
 	}

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 3/5] vfs: remove shrink_dcache_anon()
  2006-06-16 10:43 [PATCH 0/5] vfs: per-superblock unused dentries list (3rd version) jblunck
  2006-06-16 10:43 ` [PATCH 1/5] vfs: remove whitespace noise from fs/dcache.c jblunck
  2006-06-16 10:43 ` [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list jblunck
@ 2006-06-16 10:43 ` jblunck
  2006-06-16 10:43 ` [PATCH 4/5] vfs: per superblock dentry stats jblunck
  2006-06-16 10:43 ` [PATCH 5/5] vfs: per superblock dentry unused list jblunck
  4 siblings, 0 replies; 10+ messages in thread
From: jblunck @ 2006-06-16 10:43 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-fsdevel, akpm, viro, dgc, balbir, neilb

[-- Attachment #1: patches.jbl/vfs-remove-shrink_dcache_anon.diff --]
[-- Type: text/plain, Size: 5280 bytes --]

After shrink_dcache_anon() is unexported there is only one caller
(generic_shutdown_super()). Instead of calling shrink_dcache_anon() and
shrink_dcache_parent() it is better to call shrink_dcache_sb(). Therefore
shrink_dcache_sb() is extended by also reordering the anonymous dentries in
the unused list. This way we get rid of all unused dentries when we touch them
the first time instead of checking for DCACHE_REFERENCED and adding them back
to the list again.

Signed-off-by: Jan Blunck <jblunck@suse.de>
---
 fs/dcache.c            |  100 ++++++++++++++++++++++++-------------------------
 fs/super.c             |    3 -
 include/linux/dcache.h |    1 
 3 files changed, 51 insertions(+), 53 deletions(-)

Index: work-2.6/fs/dcache.c
===================================================================
--- work-2.6.orig/fs/dcache.c
+++ work-2.6/fs/dcache.c
@@ -384,9 +384,8 @@ static inline void prune_one_dentry(stru
  * @count: number of entries to try and free
  *
  * Shrink the dcache. This is done when we need
- * more memory, or simply when we need to unmount
- * something (at which point we need to unuse
- * all dentries).
+ * more memory. When we need to unmount something
+ * we call shrink_dcache_sb().
  *
  * This function may fail to free any resources if
  * all the dentries are in use.
@@ -432,6 +431,51 @@ static void prune_dcache(int count)
 	spin_unlock(&dcache_lock);
 }
 
+
+/*
+ * parsing d_hash list does not hlist_for_each_entry_rcu() as it
+ * done under dcache_lock.
+ */
+static void select_anon(struct super_block *sb)
+{
+	struct dentry *dentry;
+	struct hlist_node *lp;
+
+	spin_lock(&dcache_lock);
+	hlist_for_each_entry(dentry, lp, &sb->s_anon, d_hash) {
+		if (!list_empty(&dentry->d_lru)) {
+			dentry_stat.nr_unused--;
+			list_del_init(&dentry->d_lru);
+		}
+
+		/*
+		 * move only zero ref count dentries to the beginning
+		 * (the most recent end) of the unused list
+		 */
+		spin_lock(&dentry->d_lock);
+		if (!atomic_read(&dentry->d_count)) {
+			list_add(&dentry->d_lru, &dentry_unused);
+			dentry_stat.nr_unused++;
+		}
+		spin_unlock(&dentry->d_lock);
+	}
+	spin_unlock(&dcache_lock);
+}
+
+static void select_sb(struct super_block *sb)
+{
+	struct dentry *dentry, *pos;
+
+	spin_lock(&dcache_lock);
+	list_for_each_entry_safe(dentry, pos, &dentry_unused, d_lru) {
+		if (dentry->d_sb != sb)
+			continue;
+		list_del(&dentry->d_lru);
+		list_add(&dentry->d_lru, &dentry_unused);
+	}
+	spin_unlock(&dcache_lock);
+}
+
 /*
  * Shrink the dcache for the specified super block.
  * This allows us to unmount a device without disturbing
@@ -463,18 +507,13 @@ void shrink_dcache_sb(struct super_block
 	 * Pass one ... move the dentries for the specified
 	 * superblock to the most recent end of the unused list.
 	 */
-	spin_lock(&dcache_lock);
-	list_for_each_safe(tmp, next, &dentry_unused) {
-		dentry = list_entry(tmp, struct dentry, d_lru);
-		if (dentry->d_sb != sb)
-			continue;
-		list_del(tmp);
-		list_add(tmp, &dentry_unused);
-	}
+	select_anon(sb);
+	select_sb(sb);
 
 	/*
 	 * Pass two ... free the dentries for this superblock.
 	 */
+	spin_lock(&dcache_lock);
 repeat:
 	list_for_each_safe(tmp, next, &dentry_unused) {
 		dentry = list_entry(tmp, struct dentry, d_lru);
@@ -633,45 +672,6 @@ void shrink_dcache_parent(struct dentry 
 		prune_dcache(found);
 }
 
-/**
- * shrink_dcache_anon - further prune the cache
- * @head: head of d_hash list of dentries to prune
- *
- * Prune the dentries that are anonymous
- *
- * parsing d_hash list does not hlist_for_each_entry_rcu() as it
- * done under dcache_lock.
- *
- */
-void shrink_dcache_anon(struct hlist_head *head)
-{
-	struct hlist_node *lp;
-	int found;
-	do {
-		found = 0;
-		spin_lock(&dcache_lock);
-		hlist_for_each(lp, head) {
-			struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
-			if (!list_empty(&this->d_lru)) {
-				dentry_stat.nr_unused--;
-				list_del_init(&this->d_lru);
-			}
-
-			/*
-			 * move only zero ref count dentries to the end
-			 * of the unused list for prune_dcache
-			 */
-			if (!atomic_read(&this->d_count)) {
-				list_add_tail(&this->d_lru, &dentry_unused);
-				dentry_stat.nr_unused++;
-				found++;
-			}
-		}
-		spin_unlock(&dcache_lock);
-		prune_dcache(found);
-	} while(found);
-}
-
 /*
  * Scan `nr' dentries and return the number which remain.
  *
Index: work-2.6/fs/super.c
===================================================================
--- work-2.6.orig/fs/super.c
+++ work-2.6/fs/super.c
@@ -230,8 +230,7 @@ void generic_shutdown_super(struct super
 
 	if (root) {
 		sb->s_root = NULL;
-		shrink_dcache_parent(root);
-		shrink_dcache_anon(&sb->s_anon);
+		shrink_dcache_sb(sb);
 		dput(root);
 		fsync_super(sb);
 		lock_super(sb);
Index: work-2.6/include/linux/dcache.h
===================================================================
--- work-2.6.orig/include/linux/dcache.h
+++ work-2.6/include/linux/dcache.h
@@ -217,7 +217,6 @@ extern struct dentry * d_alloc_anon(stru
 extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
 extern void shrink_dcache_sb(struct super_block *);
 extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_anon(struct hlist_head *);
 extern int d_invalidate(struct dentry *);
 
 /* only used at mount-time */


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 4/5] vfs: per superblock dentry stats
  2006-06-16 10:43 [PATCH 0/5] vfs: per-superblock unused dentries list (3rd version) jblunck
                   ` (2 preceding siblings ...)
  2006-06-16 10:43 ` [PATCH 3/5] vfs: remove shrink_dcache_anon() jblunck
@ 2006-06-16 10:43 ` jblunck
  2006-06-16 10:43 ` [PATCH 5/5] vfs: per superblock dentry unused list jblunck
  4 siblings, 0 replies; 10+ messages in thread
From: jblunck @ 2006-06-16 10:43 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-fsdevel, akpm, viro, dgc, balbir, neilb

[-- Attachment #1: patches.jbl/vfs-per-sb-dentry_stat.diff --]
[-- Type: text/plain, Size: 8278 bytes --]

This patch adds per superblock dentry statistics about unused and absolute
number of dentries.

Signed-off-by: Jan Blunck <jblunck@suse.de>
---
 fs/dcache.c            |   47 ++++++++++++++++++++++++++---------------------
 fs/super.c             |    1 +
 include/linux/dcache.h |   21 +++++++++++++++++----
 include/linux/fs.h     |    1 +
 kernel/sysctl.c        |    2 +-
 5 files changed, 46 insertions(+), 26 deletions(-)

Index: work-2.6/fs/dcache.c
===================================================================
--- work-2.6.orig/fs/dcache.c
+++ work-2.6/fs/dcache.c
@@ -64,7 +64,7 @@ static struct hlist_head *dentry_hashtab
 static LIST_HEAD(dentry_unused);
 
 /* Statistics gathering. */
-struct dentry_stat_t dentry_stat = {
+struct dentry_stat global_dentry_stat = {
 	.age_limit = 45,
 };
 
@@ -173,7 +173,7 @@ repeat:
 	if (list_empty(&dentry->d_lru)) {
 		dentry->d_flags |= DCACHE_REFERENCED;
 		list_add(&dentry->d_lru, &dentry_unused);
-		dentry_stat.nr_unused++;
+		dentry_stat_inc(dentry->d_sb, nr_unused);
 	}
 	spin_unlock(&dentry->d_lock);
 	spin_unlock(&dcache_lock);
@@ -190,11 +190,13 @@ kill_it: {
 		 */
 		if (!list_empty(&dentry->d_lru)) {
 			list_del(&dentry->d_lru);
-			dentry_stat.nr_unused--;
+			dentry_stat_dec(dentry->d_sb, nr_unused);
 		}
 		list_del(&dentry->d_u.d_child);
-		dentry_stat.nr_dentry--;	/* For d_free, below */
-		/*drops the locks, at that point nobody can reach this dentry */
+		/* For d_free, below */
+		dentry_stat_dec(dentry->d_sb, nr_dentry);
+		/* drops the locks, at that point nobody can reach this
+		 * dentry anymore */
 		dentry_iput(dentry);
 		parent = dentry->d_parent;
 		d_free(dentry);
@@ -268,7 +270,7 @@ static inline struct dentry * __dget_loc
 {
 	atomic_inc(&dentry->d_count);
 	if (!list_empty(&dentry->d_lru)) {
-		dentry_stat.nr_unused--;
+		dentry_stat_dec(dentry->d_sb, nr_unused);
 		list_del_init(&dentry->d_lru);
 	}
 	return dentry;
@@ -370,7 +372,7 @@ static inline void prune_one_dentry(stru
 
 	__d_drop(dentry);
 	list_del(&dentry->d_u.d_child);
-	dentry_stat.nr_dentry--;	/* For d_free, below */
+	dentry_stat_dec(dentry->d_sb, nr_dentry);	/* For d_free, below */
 	dentry_iput(dentry);
 	parent = dentry->d_parent;
 	d_free(dentry);
@@ -403,10 +405,10 @@ static void prune_dcache(int count)
 		tmp = dentry_unused.prev;
 		if (tmp == &dentry_unused)
 			break;
-		list_del_init(tmp);
 		prefetch(dentry_unused.prev);
-		dentry_stat.nr_unused--;
 		dentry = list_entry(tmp, struct dentry, d_lru);
+		dentry_stat_dec(dentry->d_sb, nr_unused);
+		list_del_init(&dentry->d_lru);
 
 		spin_lock(&dentry->d_lock);
 		/*
@@ -422,7 +424,7 @@ static void prune_dcache(int count)
 		if (dentry->d_flags & DCACHE_REFERENCED) {
 			dentry->d_flags &= ~DCACHE_REFERENCED;
 			list_add(&dentry->d_lru, &dentry_unused);
-			dentry_stat.nr_unused++;
+			dentry_stat_inc(dentry->d_sb, nr_unused);
 			spin_unlock(&dentry->d_lock);
 			continue;
 		}
@@ -444,7 +446,7 @@ static void select_anon(struct super_blo
 	spin_lock(&dcache_lock);
 	hlist_for_each_entry(dentry, lp, &sb->s_anon, d_hash) {
 		if (!list_empty(&dentry->d_lru)) {
-			dentry_stat.nr_unused--;
+			dentry_stat_dec(sb, nr_unused);
 			list_del_init(&dentry->d_lru);
 		}
 
@@ -455,7 +457,7 @@ static void select_anon(struct super_blo
 		spin_lock(&dentry->d_lock);
 		if (!atomic_read(&dentry->d_count)) {
 			list_add(&dentry->d_lru, &dentry_unused);
-			dentry_stat.nr_unused++;
+			dentry_stat_inc(sb, nr_unused);
 		}
 		spin_unlock(&dentry->d_lock);
 	}
@@ -519,7 +521,7 @@ repeat:
 		dentry = list_entry(tmp, struct dentry, d_lru);
 		if (dentry->d_sb != sb)
 			continue;
-		dentry_stat.nr_unused--;
+		dentry_stat_dec(sb, nr_unused);
 		list_del_init(tmp);
 		spin_lock(&dentry->d_lock);
 		if (atomic_read(&dentry->d_count)) {
@@ -615,7 +617,7 @@ resume:
 		next = tmp->next;
 
 		if (!list_empty(&dentry->d_lru)) {
-			dentry_stat.nr_unused--;
+			dentry_stat_dec(dentry->d_sb, nr_unused);
 			list_del_init(&dentry->d_lru);
 		}
 		/*
@@ -624,7 +626,7 @@ resume:
 		 */
 		if (!atomic_read(&dentry->d_count)) {
 			list_add(&dentry->d_lru, dentry_unused.prev);
-			dentry_stat.nr_unused++;
+			dentry_stat_inc(dentry->d_sb, nr_unused);
 			found++;
 		}
 
@@ -691,7 +693,7 @@ static int shrink_dcache_memory(int nr, 
 			return -1;
 		prune_dcache(nr);
 	}
-	return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
+	return (global_dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
 /**
@@ -756,7 +758,7 @@ struct dentry *d_alloc(struct dentry * p
 	spin_lock(&dcache_lock);
 	if (parent)
 		list_add(&dentry->d_u.d_child, &parent->d_subdirs);
-	dentry_stat.nr_dentry++;
+	dentry_stat_inc(dentry->d_sb, nr_dentry);
 	spin_unlock(&dcache_lock);
 
 	return dentry;
@@ -932,6 +934,9 @@ struct dentry * d_alloc_anon(struct inod
 		tmp = NULL;
 		spin_lock(&res->d_lock);
 		res->d_sb = inode->i_sb;
+		/* Add to the sb dentry_stat here only,
+		 * the global data is updated in d_alloc */
+		res->d_sb->s_dentry_stat.nr_dentry++;
 		res->d_parent = res;
 		res->d_inode = inode;
 		res->d_flags |= DCACHE_DISCONNECTED;
@@ -1613,23 +1618,23 @@ resume:
 			goto repeat;
 		}
 		if (!list_empty(&dentry->d_lru)) {
-			dentry_stat.nr_unused--;
+			dentry_stat_dec(dentry->d_sb, nr_unused);
 			list_del_init(&dentry->d_lru);
 		}
 		if (atomic_dec_and_test(&dentry->d_count)) {
 			list_add(&dentry->d_lru, dentry_unused.prev);
-			dentry_stat.nr_unused++;
+			dentry_stat_inc(dentry->d_sb, nr_unused);
 		}
 	}
 	if (this_parent != root) {
 		next = this_parent->d_u.d_child.next;
 		if (!list_empty(&this_parent->d_lru)) {
-			dentry_stat.nr_unused--;
+			dentry_stat_dec(this_parent->d_sb, nr_unused);
 			list_del_init(&this_parent->d_lru);
 		}
 		if (atomic_dec_and_test(&this_parent->d_count)) {
 			list_add(&this_parent->d_lru, dentry_unused.prev);
-			dentry_stat.nr_unused++;
+			dentry_stat_inc(this_parent->d_sb, nr_unused);
 		}
 		this_parent = this_parent->d_parent;
 		goto resume;
Index: work-2.6/fs/super.c
===================================================================
--- work-2.6.orig/fs/super.c
+++ work-2.6/fs/super.c
@@ -71,6 +71,7 @@ static struct super_block *alloc_super(v
 		INIT_LIST_HEAD(&s->s_instances);
 		INIT_HLIST_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
+		s->s_dentry_stat.age_limit = 45;
 		init_rwsem(&s->s_umount);
 		mutex_init(&s->s_lock);
 		down_write(&s->s_umount);
Index: work-2.6/include/linux/dcache.h
===================================================================
--- work-2.6.orig/include/linux/dcache.h
+++ work-2.6/include/linux/dcache.h
@@ -36,14 +36,27 @@ struct qstr {
 	const unsigned char *name;
 };
 
-struct dentry_stat_t {
+struct dentry_stat {
 	int nr_dentry;
 	int nr_unused;
-	int age_limit;          /* age in seconds */
-	int want_pages;         /* pages requested by system */
+	int age_limit;		/* age in seconds */
 	int dummy[2];
 };
-extern struct dentry_stat_t dentry_stat;
+extern struct dentry_stat global_dentry_stat;
+
+#define dentry_stat_inc(sb, x)		\
+do {					\
+	global_dentry_stat.x++;		\
+	if (likely(sb))			\
+		(sb)->s_dentry_stat.x++;\
+} while(0)
+
+#define dentry_stat_dec(sb, x)		\
+do {					\
+	global_dentry_stat.x--;		\
+	if (likely(sb))			\
+		(sb)->s_dentry_stat.x--;\
+} while(0)
 
 /* Name hashing routines. Initial hash value */
 /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
Index: work-2.6/include/linux/fs.h
===================================================================
--- work-2.6.orig/include/linux/fs.h
+++ work-2.6/include/linux/fs.h
@@ -847,6 +847,7 @@ struct super_block {
 	struct list_head	s_io;		/* parked for writeback */
 	struct hlist_head	s_anon;		/* anonymous dentries for (nfs) exporting */
 	struct list_head	s_files;
+	struct dentry_stat	s_dentry_stat;
 
 	struct block_device	*s_bdev;
 	struct list_head	s_instances;
Index: work-2.6/kernel/sysctl.c
===================================================================
--- work-2.6.orig/kernel/sysctl.c
+++ work-2.6/kernel/sysctl.c
@@ -958,7 +958,7 @@ static ctl_table fs_table[] = {
 	{
 		.ctl_name	= FS_DENTRY,
 		.procname	= "dentry-state",
-		.data		= &dentry_stat,
+		.data		= &global_dentry_stat,
 		.maxlen		= 6*sizeof(int),
 		.mode		= 0444,
 		.proc_handler	= &proc_dointvec,

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 5/5] vfs: per superblock dentry unused list
  2006-06-16 10:43 [PATCH 0/5] vfs: per-superblock unused dentries list (3rd version) jblunck
                   ` (3 preceding siblings ...)
  2006-06-16 10:43 ` [PATCH 4/5] vfs: per superblock dentry stats jblunck
@ 2006-06-16 10:43 ` jblunck
  4 siblings, 0 replies; 10+ messages in thread
From: jblunck @ 2006-06-16 10:43 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-fsdevel, akpm, viro, dgc, balbir, neilb

[-- Attachment #1: patches.jbl/vfs-per-sb-dentry_unused.diff --]
[-- Type: text/plain, Size: 10338 bytes --]

This patch adds per superblock dentry unused lists. This should speedup the
umounting/remounting of filesystems when there are a lot of dentries on the
unused lists.

Signed-off-by: Jan Blunck <jblunck@suse.de>
---
 fs/dcache.c        |  170 +++++++++++++++++++++++++----------------------------
 fs/super.c         |    2 
 include/linux/fs.h |    2 
 3 files changed, 86 insertions(+), 88 deletions(-)

Index: work-2.6/fs/dcache.c
===================================================================
--- work-2.6.orig/fs/dcache.c
+++ work-2.6/fs/dcache.c
@@ -61,7 +61,6 @@ static kmem_cache_t *dentry_cache __read
 static unsigned int d_hash_mask __read_mostly;
 static unsigned int d_hash_shift __read_mostly;
 static struct hlist_head *dentry_hashtable __read_mostly;
-static LIST_HEAD(dentry_unused);
 
 /* Statistics gathering. */
 struct dentry_stat global_dentry_stat = {
@@ -167,12 +166,15 @@ repeat:
 		if (dentry->d_op->d_delete(dentry))
 			goto unhash_it;
 	}
+	/* Kill dentry without superblock */
+	if (unlikely(!dentry->d_sb))
+		goto unhash_it;
 	/* Unreachable? Get rid of it */
 	if (d_unhashed(dentry))
 		goto kill_it;
 	if (list_empty(&dentry->d_lru)) {
 		dentry->d_flags |= DCACHE_REFERENCED;
-		list_add(&dentry->d_lru, &dentry_unused);
+		list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_unused);
 		dentry_stat_inc(dentry->d_sb, nr_unused);
 	}
 	spin_unlock(&dentry->d_lock);
@@ -382,18 +384,20 @@ static inline void prune_one_dentry(stru
 }
 
 /**
- * prune_dcache - shrink the dcache
+ * prune_dcache_sb - prune the dcache for a superblock
+ * @sb: superblock
  * @count: number of entries to try and free
  *
- * Shrink the dcache. This is done when we need
- * more memory. When we need to unmount something
- * we call shrink_dcache_sb().
- *
- * This function may fail to free any resources if
- * all the dentries are in use.
+ * Prune the dcache for the specified super block. This walks the dentry LRU
+ * list backwards to free the @count oldest entries on it. If it finds entries
+ * which were recently referenced (the DCACHE_REFERENCED d_flag is set) they
+ * moved to the beginning of the dentry LRU list instead.
+ *
+ * You need to have a reference to the super block and should have
+ * sb->s_umount locked. This function may fail to free any resources if all
+ * the dentries are in use.
  */
-
-static void prune_dcache(int count)
+static void prune_dcache_sb(struct super_block *sb, int count)
 {
 	spin_lock(&dcache_lock);
 	for (; count ; count--) {
@@ -402,10 +406,10 @@ static void prune_dcache(int count)
 
 		cond_resched_lock(&dcache_lock);
 
-		tmp = dentry_unused.prev;
-		if (tmp == &dentry_unused)
+		tmp = sb->s_dentry_unused.prev;
+		if (tmp == &sb->s_dentry_unused)
 			break;
-		prefetch(dentry_unused.prev);
+		prefetch(sb->s_dentry_unused.prev);
 		dentry = list_entry(tmp, struct dentry, d_lru);
 		dentry_stat_dec(dentry->d_sb, nr_unused);
 		list_del_init(&dentry->d_lru);
@@ -413,7 +417,7 @@ static void prune_dcache(int count)
 		spin_lock(&dentry->d_lock);
 		/*
 		 * We found an inuse dentry which was not removed from
-		 * dentry_unused because of laziness during lookup.  Do not free
+		 * dentry_unused because of laziness during lookup. Do not free
 		 * it - just keep it off the dentry_unused list.
 		 */
 		if (atomic_read(&dentry->d_count)) {
@@ -423,7 +427,7 @@ static void prune_dcache(int count)
 		/* If the dentry was recently referenced, don't free it. */
 		if (dentry->d_flags & DCACHE_REFERENCED) {
 			dentry->d_flags &= ~DCACHE_REFERENCED;
-			list_add(&dentry->d_lru, &dentry_unused);
+			list_add(&dentry->d_lru, &sb->s_dentry_unused);
 			dentry_stat_inc(dentry->d_sb, nr_unused);
 			spin_unlock(&dentry->d_lock);
 			continue;
@@ -433,64 +437,68 @@ static void prune_dcache(int count)
 	spin_unlock(&dcache_lock);
 }
 
-
-/*
- * parsing d_hash list does not hlist_for_each_entry_rcu() as it
- * done under dcache_lock.
+/**
+ * prune_dcache - shrink the dcache
+ * @count: number of entries to try and free
+ *
+ * Prune the dcache. This is done when we need more memory. We are walking the
+ * list of superblocks and try to shrink their dentry LRU lists.
+ *
+ * This function may fail to free any resources if all the dentries are in use.
  */
-static void select_anon(struct super_block *sb)
+static void prune_dcache(int count)
 {
-	struct dentry *dentry;
-	struct hlist_node *lp;
+	struct super_block *sb;
+	int unused = global_dentry_stat.nr_unused;
 
-	spin_lock(&dcache_lock);
-	hlist_for_each_entry(dentry, lp, &sb->s_anon, d_hash) {
-		if (!list_empty(&dentry->d_lru)) {
-			dentry_stat_dec(sb, nr_unused);
-			list_del_init(&dentry->d_lru);
-		}
+	if (count <= 0)
+		return;
 
-		/*
-		 * move only zero ref count dentries to the beginning
-		 * (the most recent end) of the unused list
-		 */
-		spin_lock(&dentry->d_lock);
-		if (!atomic_read(&dentry->d_count)) {
-			list_add(&dentry->d_lru, &dentry_unused);
-			dentry_stat_inc(sb, nr_unused);
+	spin_lock(&sb_lock);
+ restart:
+	list_for_each_entry(sb, &super_blocks, s_list) {
+		sb->s_count++;
+		spin_unlock(&sb_lock);
+		if (down_read_trylock(&sb->s_umount)) {
+			if (sb->s_root) {
+				int tmp;
+
+				/*
+				 * We try to be fair and distribute the amount
+				 * of dentries to be pruned by the easy rule:
+				 *
+				 *   sb_count/sb_unused ~ count/global_unused
+				 *
+				 * This is not enough if the superblock has
+				 * <= 128 unused dentries (this is always
+				 * called via shrink_slab() with a count of
+				 * 128) therefore we use the s_scan_count to
+				 * artifically increase the dentry unused count
+				 * if we haven't pruned any dentries lately (in
+				 * the last runs of prune_dcache().
+				 */
+				tmp = sb->s_dentry_stat.nr_unused /
+					((unused /
+					  ((atomic_read(&sb->s_scan_count)+1) *
+					   count))+1);
+				if (tmp) {
+					atomic_add_unless(
+						&sb->s_scan_count, -1, 0);
+					prune_dcache_sb(sb, tmp);
+				} else
+					atomic_inc(&sb->s_scan_count, 0);
+				if (!sb->s_dentry_stat.nr_unused)
+					atomic_set(&sb->s_scan_count, 0);
+			}
+			up_read(&sb->s_umount);
 		}
-		spin_unlock(&dentry->d_lock);
-	}
-	spin_unlock(&dcache_lock);
-}
-
-static void select_sb(struct super_block *sb)
-{
-	struct dentry *dentry, *pos;
-
-	spin_lock(&dcache_lock);
-	list_for_each_entry_safe(dentry, pos, &dentry_unused, d_lru) {
-		if (dentry->d_sb != sb)
-			continue;
-		list_del(&dentry->d_lru);
-		list_add(&dentry->d_lru, &dentry_unused);
+		spin_lock(&sb_lock);
+		if (__put_super_and_need_restart(sb))
+			goto restart;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&sb_lock);
 }
 
-/*
- * Shrink the dcache for the specified super block.
- * This allows us to unmount a device without disturbing
- * the dcache for the other devices.
- *
- * This implementation makes just two traversals of the
- * unused list.  On the first pass we move the selected
- * dentries to the most recent end, and on the second
- * pass we free them.  The second pass must restart after
- * each dput(), but since the target dentries are all at
- * the end, it's really just a single traversal.
- */
-
 /**
  * shrink_dcache_sb - shrink dcache for a superblock
  * @sb: superblock
@@ -499,30 +507,16 @@ static void select_sb(struct super_block
  * is used to free the dcache before unmounting a file
  * system
  */
-
 void shrink_dcache_sb(struct super_block * sb)
 {
-	struct list_head *tmp, *next;
-	struct dentry *dentry;
-
-	/*
-	 * Pass one ... move the dentries for the specified
-	 * superblock to the most recent end of the unused list.
-	 */
-	select_anon(sb);
-	select_sb(sb);
+	struct dentry *dentry, *pos;
 
-	/*
-	 * Pass two ... free the dentries for this superblock.
-	 */
 	spin_lock(&dcache_lock);
 repeat:
-	list_for_each_safe(tmp, next, &dentry_unused) {
-		dentry = list_entry(tmp, struct dentry, d_lru);
-		if (dentry->d_sb != sb)
-			continue;
+	list_for_each_entry_safe(dentry, pos, &sb->s_dentry_unused, d_lru) {
+		BUG_ON(dentry->d_sb != sb);
 		dentry_stat_dec(sb, nr_unused);
-		list_del_init(tmp);
+		list_del_init(&dentry->d_lru);
 		spin_lock(&dentry->d_lock);
 		if (atomic_read(&dentry->d_count)) {
 			spin_unlock(&dentry->d_lock);
@@ -625,7 +619,7 @@ resume:
 		 * of the unused list for prune_dcache
 		 */
 		if (!atomic_read(&dentry->d_count)) {
-			list_add(&dentry->d_lru, dentry_unused.prev);
+			list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_unused);
 			dentry_stat_inc(dentry->d_sb, nr_unused);
 			found++;
 		}
@@ -671,7 +665,7 @@ void shrink_dcache_parent(struct dentry 
 	int found;
 
 	while ((found = select_parent(parent)) != 0)
-		prune_dcache(found);
+		prune_dcache_sb(parent->d_sb, found);
 }
 
 /*
@@ -1622,7 +1616,7 @@ resume:
 			list_del_init(&dentry->d_lru);
 		}
 		if (atomic_dec_and_test(&dentry->d_count)) {
-			list_add(&dentry->d_lru, dentry_unused.prev);
+			list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_unused);
 			dentry_stat_inc(dentry->d_sb, nr_unused);
 		}
 	}
@@ -1633,7 +1627,7 @@ resume:
 			list_del_init(&this_parent->d_lru);
 		}
 		if (atomic_dec_and_test(&this_parent->d_count)) {
-			list_add(&this_parent->d_lru, dentry_unused.prev);
+			list_add(&this_parent->d_lru, &this_parent->d_sb->s_dentry_unused);
 			dentry_stat_inc(this_parent->d_sb, nr_unused);
 		}
 		this_parent = this_parent->d_parent;
Index: work-2.6/fs/super.c
===================================================================
--- work-2.6.orig/fs/super.c
+++ work-2.6/fs/super.c
@@ -71,7 +71,9 @@ static struct super_block *alloc_super(v
 		INIT_LIST_HEAD(&s->s_instances);
 		INIT_HLIST_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
+		INIT_LIST_HEAD(&s->s_dentry_unused);
 		s->s_dentry_stat.age_limit = 45;
+		atomic_set(&s->s_scan_count, 0);
 		init_rwsem(&s->s_umount);
 		mutex_init(&s->s_lock);
 		down_write(&s->s_umount);
Index: work-2.6/include/linux/fs.h
===================================================================
--- work-2.6.orig/include/linux/fs.h
+++ work-2.6/include/linux/fs.h
@@ -847,7 +847,9 @@ struct super_block {
 	struct list_head	s_io;		/* parked for writeback */
 	struct hlist_head	s_anon;		/* anonymous dentries for (nfs) exporting */
 	struct list_head	s_files;
+	struct list_head	s_dentry_unused;
 	struct dentry_stat	s_dentry_stat;
+	atomic_t		s_scan_count;
 
 	struct block_device	*s_bdev;
 	struct list_head	s_instances;

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list
  2006-06-16 10:43 ` [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list jblunck
@ 2006-06-18 19:34   ` Balbir Singh
  2006-06-19  9:22     ` Jan Blunck
  0 siblings, 1 reply; 10+ messages in thread
From: Balbir Singh @ 2006-06-18 19:34 UTC (permalink / raw)
  To: jblunck; +Cc: linux-kernel, linux-fsdevel, akpm, viro, dgc, neilb

jblunck@suse.de wrote:
> Calling d_genocide() is only lowering the reference count of the dentries but
> doesn't add them to the unused list.
> 
> Signed-off-by: Jan Blunck <jblunck@suse.de>
> ---
>  fs/dcache.c |   18 ++++++++++++++++--
>  1 file changed, 16 insertions(+), 2 deletions(-)
> 
> Index: work-2.6/fs/dcache.c
> ===================================================================
> --- work-2.6.orig/fs/dcache.c
> +++ work-2.6/fs/dcache.c
> @@ -1612,11 +1612,25 @@ resume:
>  			this_parent = dentry;
>  			goto repeat;
>  		}
> -		atomic_dec(&dentry->d_count);
> +		if (!list_empty(&dentry->d_lru)) {
> +			dentry_stat.nr_unused--;
> +			list_del_init(&dentry->d_lru);
> +		}
> +		if (atomic_dec_and_test(&dentry->d_count)) {
> +			list_add(&dentry->d_lru, dentry_unused.prev);
> +			dentry_stat.nr_unused++;
> +		}

We could have dentries on the LRU list with non-zero d_count. If
we have a dentry on the LRU list with a count of 1, then the code
will remove it from LRU list and then add it back subsequently.

I think the condition below should be an else if

>  	}
>  	if (this_parent != root) {
>  		next = this_parent->d_u.d_child.next;
> -		atomic_dec(&this_parent->d_count);
> +		if (!list_empty(&this_parent->d_lru)) {
> +			dentry_stat.nr_unused--;
> +			list_del_init(&this_parent->d_lru);
> +		}
> +		if (atomic_dec_and_test(&this_parent->d_count)) {
> +			list_add(&this_parent->d_lru, dentry_unused.prev);
> +			dentry_stat.nr_unused++;
> +		}

Ditto

>  		this_parent = this_parent->d_parent;
>  		goto resume;
>  	}
> 

d_genocide() now almost looks like select_parent(). I think we can share a lot
of code between the two.

-- 

	Balbir Singh,
	Linux Technology Center,
	IBM Software Labs

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list
  2006-06-18 19:34   ` Balbir Singh
@ 2006-06-19  9:22     ` Jan Blunck
  2006-06-19 10:38       ` Balbir Singh
  0 siblings, 1 reply; 10+ messages in thread
From: Jan Blunck @ 2006-06-19  9:22 UTC (permalink / raw)
  To: Balbir Singh; +Cc: linux-kernel, linux-fsdevel, akpm, viro, dgc, neilb

On Mon, Jun 19, Balbir Singh wrote:

> > 			this_parent = dentry;
> > 			goto repeat;
> > 		}
> >-		atomic_dec(&dentry->d_count);
> >+		if (!list_empty(&dentry->d_lru)) {
> >+			dentry_stat.nr_unused--;
> >+			list_del_init(&dentry->d_lru);
> >+		}
> >+		if (atomic_dec_and_test(&dentry->d_count)) {
> >+			list_add(&dentry->d_lru, dentry_unused.prev);
> >+			dentry_stat.nr_unused++;
> >+		}
> 
> We could have dentries on the LRU list with non-zero d_count. If
> we have a dentry on the LRU list with a count of 1, then the code
> will remove it from LRU list and then add it back subsequently.
> 

So you think this is better?

   if (atomic_dec_and_test(&dentry->d_count)) {
      if (!list_empty(&dentry_d_lru))
         list_move_tail(&dentry->d_lru, dentry_unused);
   } else
      if (!list_empty(&dentry->d_lru)) {
         dentry_stat.nr_unused--;
         list_del_init(&dentry->d_lru);
      }


> I think the condition below should be an else if
> 

No. We always lower the reference count in d_genocide.

> 
> d_genocide() now almost looks like select_parent(). I think we can share a 
> lot
> of code between the two.
> 

Hmm, interesting idea. This would save the dentry-tree walking code in
have_submounts too. Maybe something like this:

+static int select_parent_walker(struct dentry * dentry, int * found)
+{
+       if (!list_empty(&dentry->d_lru)) {
+               dentry_stat.nr_unused--;
+               list_del_init(&dentry->d_lru);
+       }
+
+       /*
+        * move only zero ref count dentries to the end
+        * of the unused list for prune_dcache
+        */
+       if (!atomic_read(&dentry->d_count)) {
+               list_add(&dentry->d_lru, dentry_unused.prev);
+               dentry_stat.nr_unused++;
+               *found++;
+       }
+
+       /*
+        * We can return to the caller if we have found some (this
+        * ensures forward progress). We'll be coming back to find
+        * the rest.
+        */
+       if (*found && need_resched())
+               return -1;
+
+       return 0;
+}
+
+typedef int (*walker_t)(struct dentry * dentry, int * return);
+
+static int dentry_tree_walk(struct dentry * parent, walker_t walker)
+{
+       struct dentry *this_parent = parent;
+       struct list_head *next;
+       int ret = 0;
+
+       spin_lock(&dcache_lock);
+repeat:
+       next = this_parent->d_subdirs.next;
+resume:
+       while (next != &this_parent->d_subdirs) {
+               struct list_head *tmp = next;
+               struct dentry *dentry = list_entry(tmp, struct dentry,
+                                                  d_u.d_child);
+               next = tmp->next;
+
+               if (walker(dentry, &ret))
+                       goto out;
+
+               /*
+                * Descend a level if the d_subdirs list is non-empty.
+                */
+               if (!list_empty(&dentry->d_subdirs)) {
+                       this_parent = dentry;
+                       goto repeat;
+               }
+       }
+       /*
+        * All done at this level ... ascend and resume the search.
+        */
+       if (this_parent != parent) {
+               next = this_parent->d_u.d_child.next;
+               this_parent = this_parent->d_parent;
+               goto resume;
+       }
+out:
+       spin_unlock(&dcache_lock);
+       return ret;
+}

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list
  2006-06-19  9:22     ` Jan Blunck
@ 2006-06-19 10:38       ` Balbir Singh
  0 siblings, 0 replies; 10+ messages in thread
From: Balbir Singh @ 2006-06-19 10:38 UTC (permalink / raw)
  To: Jan Blunck; +Cc: linux-kernel, linux-fsdevel, akpm, viro, dgc, neilb

Jan Blunck wrote:
> On Mon, Jun 19, Balbir Singh wrote:
> 
> 
>>>			this_parent = dentry;
>>>			goto repeat;
>>>		}
>>>-		atomic_dec(&dentry->d_count);
>>>+		if (!list_empty(&dentry->d_lru)) {
>>>+			dentry_stat.nr_unused--;
>>>+			list_del_init(&dentry->d_lru);
>>>+		}
>>>+		if (atomic_dec_and_test(&dentry->d_count)) {
>>>+			list_add(&dentry->d_lru, dentry_unused.prev);
>>>+			dentry_stat.nr_unused++;
>>>+		}
>>
>>We could have dentries on the LRU list with non-zero d_count. If
>>we have a dentry on the LRU list with a count of 1, then the code
>>will remove it from LRU list and then add it back subsequently.
>>
> 
> 
> So you think this is better?
> 
>    if (atomic_dec_and_test(&dentry->d_count)) {
>       if (!list_empty(&dentry_d_lru))
>          list_move_tail(&dentry->d_lru, dentry_unused);
>    } else
>       if (!list_empty(&dentry->d_lru)) {
>          dentry_stat.nr_unused--;
>          list_del_init(&dentry->d_lru);
>       }
> 
> 

Yes, I think it is.

> 
>>I think the condition below should be an else if
>>
> 
> 
> No. We always lower the reference count in d_genocide.
> 

Yep, good catch

> 
>>d_genocide() now almost looks like select_parent(). I think we can share a 
>>lot
>>of code between the two.
>>
> 
> 
> Hmm, interesting idea. This would save the dentry-tree walking code in
> have_submounts too. Maybe something like this:
> 
> +static int select_parent_walker(struct dentry * dentry, int * found)
> +{
> +       if (!list_empty(&dentry->d_lru)) {
> +               dentry_stat.nr_unused--;
> +               list_del_init(&dentry->d_lru);
> +       }
> +
> +       /*
> +        * move only zero ref count dentries to the end
> +        * of the unused list for prune_dcache
> +        */
> +       if (!atomic_read(&dentry->d_count)) {
> +               list_add(&dentry->d_lru, dentry_unused.prev);
> +               dentry_stat.nr_unused++;
> +               *found++;
> +       }
> +
> +       /*
> +        * We can return to the caller if we have found some (this
> +        * ensures forward progress). We'll be coming back to find
> +        * the rest.
> +        */
> +       if (*found && need_resched())
> +               return -1;

Is this true for all paths? d_genocide() might actually not return

> +
> +       return 0;
> +}
> +
> +typedef int (*walker_t)(struct dentry * dentry, int * return);
> +

Will there be a different type of walker as well? Is it going to be too different?

> +static int dentry_tree_walk(struct dentry * parent, walker_t walker)
> +{
> +       struct dentry *this_parent = parent;
> +       struct list_head *next;
> +       int ret = 0;
> +
> +       spin_lock(&dcache_lock);
> +repeat:
> +       next = this_parent->d_subdirs.next;
> +resume:
> +       while (next != &this_parent->d_subdirs) {
> +               struct list_head *tmp = next;
> +               struct dentry *dentry = list_entry(tmp, struct dentry,
> +                                                  d_u.d_child);
> +               next = tmp->next;
> +
> +               if (walker(dentry, &ret))
> +                       goto out;
> +
> +               /*
> +                * Descend a level if the d_subdirs list is non-empty.
> +                */
> +               if (!list_empty(&dentry->d_subdirs)) {
> +                       this_parent = dentry;
> +                       goto repeat;
> +               }
> +       }
> +       /*
> +        * All done at this level ... ascend and resume the search.
> +        */
> +       if (this_parent != parent) {
> +               next = this_parent->d_u.d_child.next;
> +               this_parent = this_parent->d_parent;
> +               goto resume;
> +       }
> +out:
> +       spin_unlock(&dcache_lock);
> +       return ret;
> +}

The overall code looks good.

-- 

	Balbir Singh,
	Linux Technology Center,
	IBM Software Labs

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2006-06-19 10:45 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-06-16 10:43 [PATCH 0/5] vfs: per-superblock unused dentries list (3rd version) jblunck
2006-06-16 10:43 ` [PATCH 1/5] vfs: remove whitespace noise from fs/dcache.c jblunck
2006-06-16 10:43 ` [PATCH 2/5] vfs: d_genocide() doesnt add dentries to unused list jblunck
2006-06-18 19:34   ` Balbir Singh
2006-06-19  9:22     ` Jan Blunck
2006-06-19 10:38       ` Balbir Singh
2006-06-16 10:43 ` [PATCH 3/5] vfs: remove shrink_dcache_anon() jblunck
2006-06-16 10:43 ` [PATCH 4/5] vfs: per superblock dentry stats jblunck
2006-06-16 10:43 ` [PATCH 5/5] vfs: per superblock dentry unused list jblunck
  -- strict thread matches above, loose matches on Subject: below --
2006-06-01  9:51 [patch 0/5] [PATCH,RFC] vfs: per-superblock unused dentries list (2nd version) jblunck
2006-06-01  9:51 ` [patch 2/5] vfs: d_genocide() doesnt add dentries to unused list jblunck

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).