linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH, RFC] lockd: stop abusing file_lock_list
@ 2006-02-14 19:20 Christoph Hellwig
  2006-02-14 22:27 ` Trond Myklebust
  0 siblings, 1 reply; 7+ messages in thread
From: Christoph Hellwig @ 2006-02-14 19:20 UTC (permalink / raw)
  To: linux-fsdevel

Currently lockd directly access the file_lock_list from fs/locks.c.
It does so to mark locks granted or reclaimable.  This is very
suboptimal, because a) lockd needs to poke into locks.c internals, and
b) it needs to iterate over all locks in the system for marking locks
granted or reclaimable.

This patch adds lists for granted and reclaimable locks to the nlm_host
structure instead, and adds locks to those.

nlmclnt_lock:
	now adds the lock to h_granted instead of setting the
	NFS_LCK_GRANTED, still O(1)

nlmclnt_mark_reclaim:
	goes away completely, replaced by a list_splice_init.
	Complexity reduced from O(locks in the system) to O(1)

reclaimer:
	iterates over h_reclaim now, complexity reduced from
	O(locks in the system) to O(locks per nlm_host)


Index: linux-2.6/fs/lockd/clntlock.c
===================================================================
--- linux-2.6.orig/fs/lockd/clntlock.c	2006-02-08 21:04:46.000000000 +0100
+++ linux-2.6/fs/lockd/clntlock.c	2006-02-08 21:23:05.000000000 +0100
@@ -140,34 +140,6 @@
  */
 
 /*
- * Mark the locks for reclaiming.
- * FIXME: In 2.5 we don't want to iterate through any global file_lock_list.
- *        Maintain NLM lock reclaiming lists in the nlm_host instead.
- */
-static
-void nlmclnt_mark_reclaim(struct nlm_host *host)
-{
-	struct file_lock *fl;
-	struct inode *inode;
-	struct list_head *tmp;
-
-	list_for_each(tmp, &file_lock_list) {
-		fl = list_entry(tmp, struct file_lock, fl_link);
-
-		inode = fl->fl_file->f_dentry->d_inode;
-		if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
-			continue;
-		if (fl->fl_u.nfs_fl.owner == NULL)
-			continue;
-		if (fl->fl_u.nfs_fl.owner->host != host)
-			continue;
-		if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED))
-			continue;
-		fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM;
-	}
-}
-
-/*
  * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
  * that we mark locks for reclaiming, and that we bump the pseudo NSM state.
  */
@@ -179,7 +151,12 @@
 	host->h_state++;
 	host->h_nextrebind = 0;
 	nlm_rebind_host(host);
-	nlmclnt_mark_reclaim(host);
+
+	/*
+	 * Mark the locks for reclaiming.
+	 */
+	list_splice_init(&host->h_granted, &host->h_reclaim);
+
 	dprintk("NLM: reclaiming locks for host %s", host->h_name);
 }
 
@@ -208,9 +185,7 @@
 {
 	struct nlm_host	  *host = (struct nlm_host *) ptr;
 	struct nlm_wait	  *block;
-	struct list_head *tmp;
-	struct file_lock *fl;
-	struct inode *inode;
+	struct file_lock *fl, *next;
 
 	daemonize("%s-reclaim", host->h_name);
 	allow_signal(SIGKILL);
@@ -222,20 +197,9 @@
 
 	/* First, reclaim all locks that have been marked. */
 restart:
-	list_for_each(tmp, &file_lock_list) {
-		fl = list_entry(tmp, struct file_lock, fl_link);
-
-		inode = fl->fl_file->f_dentry->d_inode;
-		if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
-			continue;
-		if (fl->fl_u.nfs_fl.owner == NULL)
-			continue;
-		if (fl->fl_u.nfs_fl.owner->host != host)
-			continue;
-		if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM))
-			continue;
+	list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
+		list_del(&fl->fl_u.nfs_fl.list);
 
-		fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM;
 		nlmclnt_reclaim(host, fl);
 		if (signalled())
 			break;
Index: linux-2.6/fs/lockd/clntproc.c
===================================================================
--- linux-2.6.orig/fs/lockd/clntproc.c	2006-02-08 21:04:46.000000000 +0100
+++ linux-2.6/fs/lockd/clntproc.c	2006-02-08 21:22:17.000000000 +0100
@@ -504,7 +504,6 @@
 {
 	BUG_ON(fl->fl_ops != NULL);
 	fl->fl_u.nfs_fl.state = 0;
-	fl->fl_u.nfs_fl.flags = 0;
 	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
 	fl->fl_ops = &nlmclnt_lock_ops;
 }
@@ -591,8 +590,8 @@
 
 	if (resp->status == NLM_LCK_GRANTED) {
 		fl->fl_u.nfs_fl.state = host->h_state;
-		fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
 		fl->fl_flags |= FL_SLEEP;
+		list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
 		do_vfs_lock(fl);
 	}
 	status = nlm_stat_to_errno(resp->status);
@@ -658,9 +657,11 @@
 	struct nlm_res	*resp = &req->a_res;
 	int		status;
 
-	/* Clean the GRANTED flag now so the lock doesn't get
-	 * reclaimed while we're stuck in the unlock call. */
-	fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
+	/*
+	 * Remove from the granted list now so the lock doesn't get
+	 * reclaimed while we're stuck in the unlock call.
+	 */
+	list_del(&fl->fl_u.nfs_fl.list);
 
 	if (req->a_flags & RPC_TASK_ASYNC) {
 		status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
Index: linux-2.6/fs/lockd/host.c
===================================================================
--- linux-2.6.orig/fs/lockd/host.c	2006-01-10 13:46:05.000000000 +0100
+++ linux-2.6/fs/lockd/host.c	2006-02-08 21:12:25.000000000 +0100
@@ -123,6 +123,8 @@
 	nlm_hosts[hash]    = host;
 	INIT_LIST_HEAD(&host->h_lockowners);
 	spin_lock_init(&host->h_lock);
+	INIT_LIST_HEAD(&host->h_granted);
+	INIT_LIST_HEAD(&host->h_reclaim);
 
 	if (++nrhosts > NLM_HOST_MAX)
 		next_gc = 0;
Index: linux-2.6/include/linux/lockd/lockd.h
===================================================================
--- linux-2.6.orig/include/linux/lockd/lockd.h	2006-02-04 13:35:01.000000000 +0100
+++ linux-2.6/include/linux/lockd/lockd.h	2006-02-08 21:11:47.000000000 +0100
@@ -58,6 +58,8 @@
 	unsigned long		h_expires;	/* eligible for GC */
 	struct list_head	h_lockowners;	/* Lockowners for the client */
 	spinlock_t		h_lock;
+	struct list_head	h_granted;	/* Locks in GRANTED state */
+	struct list_head	h_reclaim;	/* Locks in RECLAIM state */
 };
 
 /*
Index: linux-2.6/include/linux/nfs_fs_i.h
===================================================================
--- linux-2.6.orig/include/linux/nfs_fs_i.h	2005-12-27 18:30:35.000000000 +0100
+++ linux-2.6/include/linux/nfs_fs_i.h	2006-02-08 21:18:38.000000000 +0100
@@ -12,8 +12,8 @@
  */
 struct nfs_lock_info {
 	u32		state;
-	u32		flags;
 	struct nlm_lockowner *owner;
+	struct list_head list;
 };
 
 struct nfs4_lock_state;
@@ -21,10 +21,4 @@
 	struct nfs4_lock_state *owner;
 };
 
-/*
- * Lock flag values
- */
-#define NFS_LCK_GRANTED		0x0001		/* lock has been granted */
-#define NFS_LCK_RECLAIM		0x0002		/* lock marked for reclaiming */
-
 #endif
Index: linux-2.6/fs/locks.c
===================================================================
--- linux-2.6.orig/fs/locks.c	2006-01-10 13:46:05.000000000 +0100
+++ linux-2.6/fs/locks.c	2006-02-08 21:24:27.000000000 +0100
@@ -139,10 +139,7 @@
 #define for_each_lock(inode, lockp) \
 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
 
-LIST_HEAD(file_lock_list);
-
-EXPORT_SYMBOL(file_lock_list);
-
+static LIST_HEAD(file_lock_list);
 static LIST_HEAD(blocked_list);
 
 static kmem_cache_t *filelock_cache;
Index: linux-2.6/include/linux/fs.h
===================================================================
--- linux-2.6.orig/include/linux/fs.h	2006-02-08 20:49:43.000000000 +0100
+++ linux-2.6/include/linux/fs.h	2006-02-08 21:24:03.000000000 +0100
@@ -730,8 +730,6 @@
 #define OFFT_OFFSET_MAX	INT_LIMIT(off_t)
 #endif
 
-extern struct list_head file_lock_list;
-
 #include <linux/fcntl.h>
 
 extern int fcntl_getlk(struct file *, struct flock __user *);

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2006-02-16 14:21 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-02-14 19:20 [PATCH, RFC] lockd: stop abusing file_lock_list Christoph Hellwig
2006-02-14 22:27 ` Trond Myklebust
2006-02-16  4:44   ` Trond Myklebust
2006-02-16 10:23     ` Jens Axboe
2006-02-16 13:46       ` Trond Myklebust
2006-02-16 13:57     ` Christoph Hellwig
2006-02-16 14:21       ` Trond Myklebust

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).