public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 000 of 4] knfsd: Introduction
@ 2006-07-28  5:09 NeilBrown
  2006-07-28  5:09 ` [PATCH 001 of 4] knfsd: Drop 'serv' option to svc_recv and svc_process NeilBrown
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: NeilBrown @ 2006-07-28  5:09 UTC (permalink / raw)
  To: Andrew Morton; +Cc: J. Bruce Fields, nfs, linux-kernel

Following are 4 patches for knfsd in 2.6-mm-latest.  They address some
issues found by Bruce Fields greatly appreciated patch review.  Thanks Bruce.
They (like the patches they build on) are *not* 2.6.18 material.

NeilBrown


 [PATCH 001 of 4] knfsd: Drop 'serv' option to svc_recv and svc_process
 [PATCH 002 of 4] knfsd: Check return value of lockd_up in write_ports
 [PATCH 003 of 4] knfsd: Move makesock failed warning into make_socks.
 [PATCH 004 of 4] knfsd: Correctly handle error condition from lockd_up

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 001 of 4] knfsd: Drop 'serv' option to svc_recv and svc_process
  2006-07-28  5:09 [PATCH 000 of 4] knfsd: Introduction NeilBrown
@ 2006-07-28  5:09 ` NeilBrown
  2006-07-28  5:09 ` [PATCH 002 of 4] knfsd: Check return value of lockd_up in write_ports NeilBrown
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: NeilBrown @ 2006-07-28  5:09 UTC (permalink / raw)
  To: Andrew Morton; +Cc: J. Bruce Fields, nfs, linux-kernel


It isn't needed as it is available in rqstp->rq_server,
and dropping it allows some local vars to be dropped.

Cc: "J. Bruce Fields" <bfields@fieldses.org>

Signed-off-by: Neil Brown <neilb@suse.de>

### Diffstat output
 ./fs/lockd/svc.c                 |    7 +++----
 ./fs/nfsd/nfssvc.c               |    6 ++----
 ./include/linux/sunrpc/svc.h     |    2 +-
 ./include/linux/sunrpc/svcsock.h |    2 +-
 ./net/sunrpc/svc.c               |    3 ++-
 ./net/sunrpc/svcsock.c           |    3 ++-
 6 files changed, 11 insertions(+), 12 deletions(-)

diff .prev/fs/lockd/svc.c ./fs/lockd/svc.c
--- .prev/fs/lockd/svc.c	2006-07-28 11:34:32.000000000 +1000
+++ ./fs/lockd/svc.c	2006-07-28 11:56:20.000000000 +1000
@@ -98,7 +98,6 @@ static inline void clear_grace_period(vo
 static void
 lockd(struct svc_rqst *rqstp)
 {
-	struct svc_serv	*serv = rqstp->rq_server;
 	int		err = 0;
 	unsigned long grace_period_expire;
 
@@ -114,7 +113,7 @@ lockd(struct svc_rqst *rqstp)
 	 * Let our maker know we're running.
 	 */
 	nlmsvc_pid = current->pid;
-	nlmsvc_serv = serv;
+	nlmsvc_serv = rqstp->rq_server;
 	complete(&lockd_start_done);
 
 	daemonize("lockd");
@@ -164,7 +163,7 @@ lockd(struct svc_rqst *rqstp)
 		 * Find a socket with data available and call its
 		 * recvfrom routine.
 		 */
-		err = svc_recv(serv, rqstp, timeout);
+		err = svc_recv(rqstp, timeout);
 		if (err == -EAGAIN || err == -EINTR)
 			continue;
 		if (err < 0) {
@@ -177,7 +176,7 @@ lockd(struct svc_rqst *rqstp)
 		dprintk("lockd: request from %08x\n",
 			(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));
 
-		svc_process(serv, rqstp);
+		svc_process(rqstp);
 
 	}
 

diff .prev/fs/nfsd/nfssvc.c ./fs/nfsd/nfssvc.c
--- .prev/fs/nfsd/nfssvc.c	2006-07-28 11:34:33.000000000 +1000
+++ ./fs/nfsd/nfssvc.c	2006-07-28 11:55:59.000000000 +1000
@@ -323,7 +323,6 @@ update_thread_usage(int busy_threads)
 static void
 nfsd(struct svc_rqst *rqstp)
 {
-	struct svc_serv	*serv = rqstp->rq_server;
 	struct fs_struct *fsp;
 	int		err;
 	struct nfsd_list me;
@@ -373,8 +372,7 @@ nfsd(struct svc_rqst *rqstp)
 		 * Find a socket with data available and call its
 		 * recvfrom routine.
 		 */
-		while ((err = svc_recv(serv, rqstp,
-				       60*60*HZ)) == -EAGAIN)
+		while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
 			;
 		if (err < 0)
 			break;
@@ -387,7 +385,7 @@ nfsd(struct svc_rqst *rqstp)
 		/* Process request with signals blocked.  */
 		sigprocmask(SIG_SETMASK, &allowed_mask, NULL);
 
-		svc_process(serv, rqstp);
+		svc_process(rqstp);
 
 		/* Unlock export hash tables */
 		exp_readunlock();

diff .prev/include/linux/sunrpc/svc.h ./include/linux/sunrpc/svc.h
--- .prev/include/linux/sunrpc/svc.h	2006-07-28 11:34:32.000000000 +1000
+++ ./include/linux/sunrpc/svc.h	2006-07-28 11:54:16.000000000 +1000
@@ -321,7 +321,7 @@ struct svc_serv *  svc_create(struct svc
 int		   svc_create_thread(svc_thread_fn, struct svc_serv *);
 void		   svc_exit_thread(struct svc_rqst *);
 void		   svc_destroy(struct svc_serv *);
-int		   svc_process(struct svc_serv *, struct svc_rqst *);
+int		   svc_process(struct svc_rqst *);
 int		   svc_register(struct svc_serv *, int, unsigned short);
 void		   svc_wake_up(struct svc_serv *);
 void		   svc_reserve(struct svc_rqst *rqstp, int space);

diff .prev/include/linux/sunrpc/svcsock.h ./include/linux/sunrpc/svcsock.h
--- .prev/include/linux/sunrpc/svcsock.h	2006-07-28 11:34:33.000000000 +1000
+++ ./include/linux/sunrpc/svcsock.h	2006-07-28 11:54:40.000000000 +1000
@@ -57,7 +57,7 @@ struct svc_sock {
  */
 int		svc_makesock(struct svc_serv *, int, unsigned short);
 void		svc_delete_socket(struct svc_sock *);
-int		svc_recv(struct svc_serv *, struct svc_rqst *, long);
+int		svc_recv(struct svc_rqst *, long);
 int		svc_send(struct svc_rqst *);
 void		svc_drop(struct svc_rqst *);
 void		svc_sock_update_bufs(struct svc_serv *serv);

diff .prev/net/sunrpc/svc.c ./net/sunrpc/svc.c
--- .prev/net/sunrpc/svc.c	2006-07-28 11:34:32.000000000 +1000
+++ ./net/sunrpc/svc.c	2006-07-28 11:53:47.000000000 +1000
@@ -253,13 +253,14 @@ svc_register(struct svc_serv *serv, int 
  * Process the RPC request.
  */
 int
-svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
+svc_process(struct svc_rqst *rqstp)
 {
 	struct svc_program	*progp;
 	struct svc_version	*versp = NULL;	/* compiler food */
 	struct svc_procedure	*procp = NULL;
 	struct kvec *		argv = &rqstp->rq_arg.head[0];
 	struct kvec *		resv = &rqstp->rq_res.head[0];
+	struct svc_serv		*serv = rqstp->rq_server;
 	kxdrproc_t		xdr;
 	u32			*statp;
 	u32			dir, prog, vers, proc,

diff .prev/net/sunrpc/svcsock.c ./net/sunrpc/svcsock.c
--- .prev/net/sunrpc/svcsock.c	2006-07-28 11:34:33.000000000 +1000
+++ ./net/sunrpc/svcsock.c	2006-07-28 11:55:15.000000000 +1000
@@ -1176,9 +1176,10 @@ svc_sock_update_bufs(struct svc_serv *se
  * Receive the next request on any socket.
  */
 int
-svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
+svc_recv(struct svc_rqst *rqstp, long timeout)
 {
 	struct svc_sock		*svsk =NULL;
+	struct svc_serv		*serv = rqstp->rq_server;
 	int			len;
 	int 			pages;
 	struct xdr_buf		*arg;

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 002 of 4] knfsd: Check return value of lockd_up in write_ports
  2006-07-28  5:09 [PATCH 000 of 4] knfsd: Introduction NeilBrown
  2006-07-28  5:09 ` [PATCH 001 of 4] knfsd: Drop 'serv' option to svc_recv and svc_process NeilBrown
@ 2006-07-28  5:09 ` NeilBrown
  2006-07-28  5:09 ` [PATCH 003 of 4] knfsd: Move makesock failed warning into make_socks NeilBrown
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: NeilBrown @ 2006-07-28  5:09 UTC (permalink / raw)
  To: Andrew Morton; +Cc: J. Bruce Fields, nfs, linux-kernel


We should be checking the return value of lockd_up when
adding a new socket to nfsd.
So move the lockd_up before the svc_addsock and check
the return value.
The move is because lockd_down is easy, but there is no easy
way to remove a recently added socket.

Cc: "J. Bruce Fields" <bfields@fieldses.org>

Signed-off-by: Neil Brown <neilb@suse.de>

### Diffstat output
 ./fs/nfsd/nfsctl.c |    9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff .prev/fs/nfsd/nfsctl.c ./fs/nfsd/nfsctl.c
--- .prev/fs/nfsd/nfsctl.c	2006-07-28 11:34:33.000000000 +1000
+++ ./fs/nfsd/nfsctl.c	2006-07-28 12:07:35.000000000 +1000
@@ -454,12 +454,15 @@ static ssize_t write_ports(struct file *
 		err = nfsd_create_serv();
 		if (!err) {
 			int proto = 0;
-			err = svc_addsock(nfsd_serv, fd, buf, &proto);
+			err = lockd_up(proto);
+			if (!err) {
+				err = svc_addsock(nfsd_serv, fd, buf, &proto);
+				if (err)
+					lockd_down();
+			}
 			/* Decrease the count, but don't shutdown the
 			 * the service
 			 */
-			if (err >= 0)
-				lockd_up(proto);
 			nfsd_serv->sv_nrthreads--;
 		}
 		return err;

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 003 of 4] knfsd: Move makesock failed warning into make_socks.
  2006-07-28  5:09 [PATCH 000 of 4] knfsd: Introduction NeilBrown
  2006-07-28  5:09 ` [PATCH 001 of 4] knfsd: Drop 'serv' option to svc_recv and svc_process NeilBrown
  2006-07-28  5:09 ` [PATCH 002 of 4] knfsd: Check return value of lockd_up in write_ports NeilBrown
@ 2006-07-28  5:09 ` NeilBrown
  2006-07-28  5:10 ` [PATCH 004 of 4] knfsd: Correctly handle error condition from lockd_up NeilBrown
  2006-07-28 21:10 ` [PATCH 000 of 4] knfsd: Introduction J. Bruce Fields
  4 siblings, 0 replies; 7+ messages in thread
From: NeilBrown @ 2006-07-28  5:09 UTC (permalink / raw)
  To: Andrew Morton; +Cc: J. Bruce Fields, nfs, linux-kernel


Thus it is printed for any path that leads to failure (make_socks is
called from two places).

Cc: "J. Bruce Fields" <bfields@fieldses.org>

Signed-off-by: Neil Brown <neilb@suse.de>

### Diffstat output
 ./fs/lockd/svc.c |   18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff .prev/fs/lockd/svc.c ./fs/lockd/svc.c
--- .prev/fs/lockd/svc.c	2006-07-28 15:00:55.000000000 +1000
+++ ./fs/lockd/svc.c	2006-07-28 15:01:30.000000000 +1000
@@ -227,15 +227,19 @@ static int make_socks(struct svc_serv *s
 	 * If nlm_udpport or nlm_tcpport were set as module
 	 * options, make those sockets unconditionally
 	 */
+	static int		warned;
 	int err = 0;
 	if (proto == IPPROTO_UDP || nlm_udpport)
 		if (!find_socket(serv, IPPROTO_UDP))
 			err = svc_makesock(serv, IPPROTO_UDP, nlm_udpport);
-	if (err)
-		return err;
-	if (proto == IPPROTO_TCP || nlm_tcpport)
+	if (err == 0 && (proto == IPPROTO_TCP || nlm_tcpport))
 		if (!find_socket(serv, IPPROTO_TCP))
 			err= svc_makesock(serv, IPPROTO_TCP, nlm_tcpport);
+	if (!err)
+		warned = 0;
+	else if (warned++ == 0)
+		printk(KERN_WARNING
+		       "lockd_up: makesock failed, error=%d\n", err);
 	return err;
 }
 
@@ -245,7 +249,6 @@ static int make_socks(struct svc_serv *s
 int
 lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */
 {
-	static int		warned;
 	struct svc_serv *	serv;
 	int			error = 0;
 
@@ -278,13 +281,8 @@ lockd_up(int proto) /* Maybe add a 'fami
 		goto out;
 	}
 
-	if ((error = make_socks(serv, proto)) < 0) {
-		if (warned++ == 0) 
-			printk(KERN_WARNING
-				"lockd_up: makesock failed, error=%d\n", error);
+	if ((error = make_socks(serv, proto)) < 0)
 		goto destroy_and_out;
-	} 
-	warned = 0;
 
 	/*
 	 * Create the kernel thread and wait for it to start.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 004 of 4] knfsd: Correctly handle error condition from lockd_up
  2006-07-28  5:09 [PATCH 000 of 4] knfsd: Introduction NeilBrown
                   ` (2 preceding siblings ...)
  2006-07-28  5:09 ` [PATCH 003 of 4] knfsd: Move makesock failed warning into make_socks NeilBrown
@ 2006-07-28  5:10 ` NeilBrown
  2006-07-28 21:10 ` [PATCH 000 of 4] knfsd: Introduction J. Bruce Fields
  4 siblings, 0 replies; 7+ messages in thread
From: NeilBrown @ 2006-07-28  5:10 UTC (permalink / raw)
  To: Andrew Morton; +Cc: J. Bruce Fields, nfs, linux-kernel


If lockd_up fails - what should we expect?  Do we have to later call lockd_down?

Well the nfs client thinks "no", the nfs server thinks "yes".
lockd thinks "yes".

The only answer that really makes sense is "no" !!

So:
  Make lockd_up only increment  nlmsvc_users on success.
  Make nfsd handle errors from lockd_up properly.
  Make sure lockd_up(0) never fails when lockd is running
    so that the 'reclaimer' call to lockd_up doesn't need to
    be error checked.

Cc: "J. Bruce Fields" <bfields@fieldses.org>

Signed-off-by: Neil Brown <neilb@suse.de>

### Diffstat output
 ./fs/lockd/clntlock.c |    2 +-
 ./fs/lockd/svc.c      |   12 +++++-------
 ./fs/nfsd/nfssvc.c    |   16 ++++++++++------
 3 files changed, 16 insertions(+), 14 deletions(-)

diff .prev/fs/lockd/clntlock.c ./fs/lockd/clntlock.c
--- .prev/fs/lockd/clntlock.c	2006-07-28 14:53:28.000000000 +1000
+++ ./fs/lockd/clntlock.c	2006-07-28 15:01:38.000000000 +1000
@@ -202,7 +202,7 @@ reclaimer(void *ptr)
 	/* This one ensures that our parent doesn't terminate while the
 	 * reclaim is in progress */
 	lock_kernel();
-	lockd_up(0);
+	lockd_up(0); /* note: this cannot fail as lockd is already running */
 
 	nlmclnt_prepare_reclaim(host);
 	/* First, reclaim all locks that have been marked. */

diff .prev/fs/lockd/svc.c ./fs/lockd/svc.c
--- .prev/fs/lockd/svc.c	2006-07-28 15:01:30.000000000 +1000
+++ ./fs/lockd/svc.c	2006-07-28 15:01:38.000000000 +1000
@@ -254,15 +254,11 @@ lockd_up(int proto) /* Maybe add a 'fami
 
 	mutex_lock(&nlmsvc_mutex);
 	/*
-	 * Unconditionally increment the user count ... this is
-	 * the number of clients who _want_ a lockd process.
-	 */
-	nlmsvc_users++; 
-	/*
 	 * Check whether we're already up and running.
 	 */
 	if (nlmsvc_pid) {
-		error = make_socks(nlmsvc_serv, proto);
+		if (proto)
+			error = make_socks(nlmsvc_serv, proto);
 		goto out;
 	}
 
@@ -270,7 +266,7 @@ lockd_up(int proto) /* Maybe add a 'fami
 	 * Sanity check: if there's no pid,
 	 * we should be the first user ...
 	 */
-	if (nlmsvc_users > 1)
+	if (nlmsvc_users)
 		printk(KERN_WARNING
 			"lockd_up: no pid, %d users??\n", nlmsvc_users);
 
@@ -302,6 +298,8 @@ lockd_up(int proto) /* Maybe add a 'fami
 destroy_and_out:
 	svc_destroy(serv);
 out:
+	if (!error)
+		nlmsvc_users++;
 	mutex_unlock(&nlmsvc_mutex);
 	return error;
 }

diff .prev/fs/nfsd/nfssvc.c ./fs/nfsd/nfssvc.c
--- .prev/fs/nfsd/nfssvc.c	2006-07-28 14:53:28.000000000 +1000
+++ ./fs/nfsd/nfssvc.c	2006-07-28 15:01:38.000000000 +1000
@@ -221,18 +221,22 @@ static int nfsd_init_socks(int port)
 	if (!list_empty(&nfsd_serv->sv_permsocks))
 		return 0;
 
-	error = svc_makesock(nfsd_serv, IPPROTO_UDP, port);
-	if (error < 0)
-		return error;
 	error = lockd_up(IPPROTO_UDP);
+	if (error >= 0) {
+		error = svc_makesock(nfsd_serv, IPPROTO_UDP, port);
+		if (error < 0)
+			lockd_down();
+	}
 	if (error < 0)
 		return error;
 
 #ifdef CONFIG_NFSD_TCP
-	error = svc_makesock(nfsd_serv, IPPROTO_TCP, port);
-	if (error < 0)
-		return error;
 	error = lockd_up(IPPROTO_TCP);
+	if (error >= 0) {
+		error = svc_makesock(nfsd_serv, IPPROTO_TCP, port);
+		if (error < 0)
+			lockd_down();
+	}
 	if (error < 0)
 		return error;
 #endif

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 000 of 4] knfsd: Introduction
  2006-07-28  5:09 [PATCH 000 of 4] knfsd: Introduction NeilBrown
                   ` (3 preceding siblings ...)
  2006-07-28  5:10 ` [PATCH 004 of 4] knfsd: Correctly handle error condition from lockd_up NeilBrown
@ 2006-07-28 21:10 ` J. Bruce Fields
  2006-08-03  1:22   ` [NFS] " Neil Brown
  4 siblings, 1 reply; 7+ messages in thread
From: J. Bruce Fields @ 2006-07-28 21:10 UTC (permalink / raw)
  To: NeilBrown; +Cc: Andrew Morton, nfs, linux-kernel

On Fri, Jul 28, 2006 at 03:09:40PM +1000, NeilBrown wrote:
> Following are 4 patches for knfsd in 2.6-mm-latest.  They address some
> issues found by Bruce Fields greatly appreciated patch review.  Thanks Bruce.
> They (like the patches they build on) are *not* 2.6.18 material.

By the way, the one thing that looked to me like a real bug was the
failure to do a lockd_down() when the user deletes a socket (comments
resent below), which these patches don't seem to deal with.  Of course,
it's entirely possible I just didn't understand something....

--b.


On Tue, Jul 25, 2006 at 11:55:08AM +1000, NeilBrown wrote:
> +		err = nfsd_create_serv();
> +		if (!err) {
> +			int proto = 0;
> +			err = svc_addsock(nfsd_serv, fd, buf, &proto);
> +			/* Decrease the count, but don't shutdown the
> +			 * the service
> +			 */
> +			if (err >= 0)
> +				lockd_up(proto);
> +			nfsd_serv->sv_nrthreads--;
....
> @@ -211,8 +211,6 @@ static inline int nfsd_create_serv(void)
>  			       nfsd_last_thread);
>  	if (nfsd_serv == NULL)
>  		err = -ENOMEM;
> -	else
> -		nfsd_serv->sv_nrthreads++;

I don't understand these sv_nrthreads changes.

> @@ -449,18 +450,23 @@ int one_sock_name(char *buf, struct svc_
>  }
>  
>  int
> -svc_sock_names(char *buf, struct svc_serv *serv)
> +svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
>  {
> -	struct svc_sock *svsk;
> +	struct svc_sock *svsk, *closesk = NULL;
>  	int len = 0;
>  
>  	if (!serv) return 0;
>  	spin_lock(&serv->sv_lock);
>  	list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
>  		int onelen = one_sock_name(buf+len, svsk);
> -		len += onelen;
> +		if (toclose && strcmp(toclose, buf+len) == 0)
> +			closesk = svsk;
> +		else
> +			len += onelen;
>  	}
>  	spin_unlock(&serv->sv_lock);
> +	if (closesk)
> +		svc_delete_socket(closesk);

Am I missing something, or do we end up missing a lockd_down() in this
case?  (Because nfsd_last_thread() isn't going to be calling
lockd_down() for this thread now that we've removed it from
sv_permsocks).

--b.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [NFS] [PATCH 000 of 4] knfsd: Introduction
  2006-07-28 21:10 ` [PATCH 000 of 4] knfsd: Introduction J. Bruce Fields
@ 2006-08-03  1:22   ` Neil Brown
  0 siblings, 0 replies; 7+ messages in thread
From: Neil Brown @ 2006-08-03  1:22 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: Andrew Morton, nfs, linux-kernel

On Friday July 28, bfields@fieldses.org wrote:
> On Fri, Jul 28, 2006 at 03:09:40PM +1000, NeilBrown wrote:
> > Following are 4 patches for knfsd in 2.6-mm-latest.  They address some
> > issues found by Bruce Fields greatly appreciated patch review.  Thanks Bruce.
> > They (like the patches they build on) are *not* 2.6.18 material.
> 
> By the way, the one thing that looked to me like a real bug was the
> failure to do a lockd_down() when the user deletes a socket (comments
> resent below), which these patches don't seem to deal with.  Of course,
> it's entirely possible I just didn't understand something....
> 

Ofcourse, it is also entirely possible that you understand perfectly,
and that seems to be the case here.  If the svc_sock_names call in
write_ports returns success, we should do a 'lockd_down' - and make
sure it does return success only if the close succeeded.


> --b.
> 
> 
> On Tue, Jul 25, 2006 at 11:55:08AM +1000, NeilBrown wrote:
> > +		err = nfsd_create_serv();
> > +		if (!err) {
> > +			int proto = 0;
> > +			err = svc_addsock(nfsd_serv, fd, buf, &proto);
> > +			/* Decrease the count, but don't shutdown the
> > +			 * the service
> > +			 */
> > +			if (err >= 0)
> > +				lockd_up(proto);
> > +			nfsd_serv->sv_nrthreads--;
> ....
> > @@ -211,8 +211,6 @@ static inline int nfsd_create_serv(void)
> >  			       nfsd_last_thread);
> >  	if (nfsd_serv == NULL)
> >  		err = -ENOMEM;
> > -	else
> > -		nfsd_serv->sv_nrthreads++;
> 
> I don't understand these sv_nrthreads changes.
> 

The first is simply to counter the reference that was gained in
nfsd_create_serv. 

The second chunk (removing sv_nrthread++) is fixing a bug in an
earlier patch.  We never should have had that ++ there, as
'svc_create' created the object with a reference already.

Thanks,
NeilBrown

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2006-08-03  1:22 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-07-28  5:09 [PATCH 000 of 4] knfsd: Introduction NeilBrown
2006-07-28  5:09 ` [PATCH 001 of 4] knfsd: Drop 'serv' option to svc_recv and svc_process NeilBrown
2006-07-28  5:09 ` [PATCH 002 of 4] knfsd: Check return value of lockd_up in write_ports NeilBrown
2006-07-28  5:09 ` [PATCH 003 of 4] knfsd: Move makesock failed warning into make_socks NeilBrown
2006-07-28  5:10 ` [PATCH 004 of 4] knfsd: Correctly handle error condition from lockd_up NeilBrown
2006-07-28 21:10 ` [PATCH 000 of 4] knfsd: Introduction J. Bruce Fields
2006-08-03  1:22   ` [NFS] " Neil Brown

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox