public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 000 of 2] knfsd: Close oopsable race in nfsd
@ 2007-04-17  2:01 NeilBrown
  2007-04-17  2:01 ` [PATCH 001 of 2] knfsd: Use a spinlock to protect sk_info_authunix NeilBrown
  2007-04-17  2:01 ` [PATCH 002 of 2] knfsd: Rename sk_defer_lock to sk_lock NeilBrown
  0 siblings, 2 replies; 3+ messages in thread
From: NeilBrown @ 2007-04-17  2:01 UTC (permalink / raw)
  To: Andrew Morton; +Cc: nfs, linux-kernel, Greg Banks, stable

Following two patches fix a bug introduced in 
   7b2b1fee30df7e2165525cd03f7d1d01a3a56794
and hence is in 2.6.19 and later.
The first patch is a minimal fix which is suitable for all kernels 
since 2.6.19-pre1.  The second adds some consequent cleaning up
and is probably best left for 2.6.22-rc (and so it not being cc:ed 
to stable@kernel.org).

Thanks,
NeilBrown


 [PATCH 001 of 2] knfsd: Use a spinlock to protect sk_info_authunix
 [PATCH 002 of 2] knfsd: Rename sk_defer_lock to sk_lock

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 001 of 2] knfsd: Use a spinlock to protect sk_info_authunix
  2007-04-17  2:01 [PATCH 000 of 2] knfsd: Close oopsable race in nfsd NeilBrown
@ 2007-04-17  2:01 ` NeilBrown
  2007-04-17  2:01 ` [PATCH 002 of 2] knfsd: Rename sk_defer_lock to sk_lock NeilBrown
  1 sibling, 0 replies; 3+ messages in thread
From: NeilBrown @ 2007-04-17  2:01 UTC (permalink / raw)
  To: Andrew Morton; +Cc: nfs, linux-kernel, Greg Banks, Gabriel Barazer, stable


sk_info_authunix is not being protected properly so the object that
it points to can be cache_put twice, leading to corruption.

We borrow svsk->sk_defer_lock to provide the protection.  We should probably
rename that lock to have a more generic name - later.

Thanks to Gabriel for reporting this.

Cc: Greg Banks <gnb@melbourne.sgi.com>
Cc: Gabriel Barazer <gabriel@oxeva.fr>
Signed-off-by: Neil Brown <neilb@suse.de>

### Diffstat output
 ./net/sunrpc/svcauth_unix.c |   21 ++++++++++++++++-----
 1 file changed, 16 insertions(+), 5 deletions(-)

diff .prev/net/sunrpc/svcauth_unix.c ./net/sunrpc/svcauth_unix.c
--- .prev/net/sunrpc/svcauth_unix.c	2007-04-17 11:42:14.000000000 +1000
+++ ./net/sunrpc/svcauth_unix.c	2007-04-17 11:42:21.000000000 +1000
@@ -383,7 +383,10 @@ void svcauth_unix_purge(void)
 static inline struct ip_map *
 ip_map_cached_get(struct svc_rqst *rqstp)
 {
-	struct ip_map *ipm = rqstp->rq_sock->sk_info_authunix;
+	struct ip_map *ipm;
+	struct svc_sock *svsk = rqstp->rq_sock;
+	spin_lock_bh(&svsk->sk_defer_lock);
+	ipm = svsk->sk_info_authunix;
 	if (ipm != NULL) {
 		if (!cache_valid(&ipm->h)) {
 			/*
@@ -391,12 +394,14 @@ ip_map_cached_get(struct svc_rqst *rqstp
 			 * remembered, e.g. by a second mount from the
 			 * same IP address.
 			 */
-			rqstp->rq_sock->sk_info_authunix = NULL;
+			svsk->sk_info_authunix = NULL;
+			spin_unlock_bh(&svsk->sk_defer_lock);
 			cache_put(&ipm->h, &ip_map_cache);
 			return NULL;
 		}
 		cache_get(&ipm->h);
 	}
+	spin_unlock_bh(&svsk->sk_defer_lock);
 	return ipm;
 }
 
@@ -405,9 +410,15 @@ ip_map_cached_put(struct svc_rqst *rqstp
 {
 	struct svc_sock *svsk = rqstp->rq_sock;
 
-	if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL)
-		svsk->sk_info_authunix = ipm;	/* newly cached, keep the reference */
-	else
+	spin_lock_bh(&svsk->sk_defer_lock);
+	if (svsk->sk_sock->type == SOCK_STREAM &&
+	    svsk->sk_info_authunix == NULL) {
+		/* newly cached, keep the reference */
+		svsk->sk_info_authunix = ipm;
+		ipm = NULL;
+	}
+	spin_unlock_bh(&svsk->sk_defer_lock);
+	if (ipm)
 		cache_put(&ipm->h, &ip_map_cache);
 }
 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 002 of 2] knfsd: Rename sk_defer_lock to sk_lock
  2007-04-17  2:01 [PATCH 000 of 2] knfsd: Close oopsable race in nfsd NeilBrown
  2007-04-17  2:01 ` [PATCH 001 of 2] knfsd: Use a spinlock to protect sk_info_authunix NeilBrown
@ 2007-04-17  2:01 ` NeilBrown
  1 sibling, 0 replies; 3+ messages in thread
From: NeilBrown @ 2007-04-17  2:01 UTC (permalink / raw)
  To: Andrew Morton; +Cc: nfs, linux-kernel, Greg Banks


Now that sk_defer_lock protects two different things, make the
name more generic.

Also don't bother with disabling _bh as the lock is only
ever taken from process context.

Signed-off-by: Neil Brown <neilb@suse.de>

### Diffstat output
 ./include/linux/sunrpc/svcsock.h |    3 ++-
 ./net/sunrpc/svcauth_unix.c      |   10 +++++-----
 ./net/sunrpc/svcsock.c           |   13 +++++++------
 3 files changed, 14 insertions(+), 12 deletions(-)

diff .prev/include/linux/sunrpc/svcsock.h ./include/linux/sunrpc/svcsock.h
--- .prev/include/linux/sunrpc/svcsock.h	2007-04-17 11:42:13.000000000 +1000
+++ ./include/linux/sunrpc/svcsock.h	2007-04-17 11:42:26.000000000 +1000
@@ -37,7 +37,8 @@ struct svc_sock {
 
 	atomic_t    	    	sk_reserved;	/* space on outq that is reserved */
 
-	spinlock_t		sk_defer_lock;	/* protects sk_deferred */
+	spinlock_t		sk_lock;	/* protects sk_deferred and
+						 * sk_info_authunix */
 	struct list_head	sk_deferred;	/* deferred requests that need to
 						 * be revisted */
 	struct mutex		sk_mutex;	/* to serialize sending data */

diff .prev/net/sunrpc/svcauth_unix.c ./net/sunrpc/svcauth_unix.c
--- .prev/net/sunrpc/svcauth_unix.c	2007-04-17 11:42:21.000000000 +1000
+++ ./net/sunrpc/svcauth_unix.c	2007-04-17 11:42:26.000000000 +1000
@@ -385,7 +385,7 @@ ip_map_cached_get(struct svc_rqst *rqstp
 {
 	struct ip_map *ipm;
 	struct svc_sock *svsk = rqstp->rq_sock;
-	spin_lock_bh(&svsk->sk_defer_lock);
+	spin_lock(&svsk->sk_lock);
 	ipm = svsk->sk_info_authunix;
 	if (ipm != NULL) {
 		if (!cache_valid(&ipm->h)) {
@@ -395,13 +395,13 @@ ip_map_cached_get(struct svc_rqst *rqstp
 			 * same IP address.
 			 */
 			svsk->sk_info_authunix = NULL;
-			spin_unlock_bh(&svsk->sk_defer_lock);
+			spin_unlock(&svsk->sk_lock);
 			cache_put(&ipm->h, &ip_map_cache);
 			return NULL;
 		}
 		cache_get(&ipm->h);
 	}
-	spin_unlock_bh(&svsk->sk_defer_lock);
+	spin_unlock(&svsk->sk_lock);
 	return ipm;
 }
 
@@ -410,14 +410,14 @@ ip_map_cached_put(struct svc_rqst *rqstp
 {
 	struct svc_sock *svsk = rqstp->rq_sock;
 
-	spin_lock_bh(&svsk->sk_defer_lock);
+	spin_lock(&svsk->sk_lock);
 	if (svsk->sk_sock->type == SOCK_STREAM &&
 	    svsk->sk_info_authunix == NULL) {
 		/* newly cached, keep the reference */
 		svsk->sk_info_authunix = ipm;
 		ipm = NULL;
 	}
-	spin_unlock_bh(&svsk->sk_defer_lock);
+	spin_unlock(&svsk->sk_lock);
 	if (ipm)
 		cache_put(&ipm->h, &ip_map_cache);
 }

diff .prev/net/sunrpc/svcsock.c ./net/sunrpc/svcsock.c
--- .prev/net/sunrpc/svcsock.c	2007-04-17 11:42:13.000000000 +1000
+++ ./net/sunrpc/svcsock.c	2007-04-17 11:42:26.000000000 +1000
@@ -53,7 +53,8 @@
  * 	svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
  *	when both need to be taken (rare), svc_serv->sv_lock is first.
  *	BKL protects svc_serv->sv_nrthread.
- *	svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
+ *	svc_sock->sk_lock protects the svc_sock->sk_deferred list
+ *             and the ->sk_info_authunix cache.
  *	svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
  *
  *	Some flags can be set to certain values at any time
@@ -1625,7 +1626,7 @@ static struct svc_sock *svc_setup_socket
 	svsk->sk_server = serv;
 	atomic_set(&svsk->sk_inuse, 1);
 	svsk->sk_lastrecv = get_seconds();
-	spin_lock_init(&svsk->sk_defer_lock);
+	spin_lock_init(&svsk->sk_lock);
 	INIT_LIST_HEAD(&svsk->sk_deferred);
 	INIT_LIST_HEAD(&svsk->sk_ready);
 	mutex_init(&svsk->sk_mutex);
@@ -1849,9 +1850,9 @@ static void svc_revisit(struct cache_def
 	dprintk("revisit queued\n");
 	svsk = dr->svsk;
 	dr->svsk = NULL;
-	spin_lock_bh(&svsk->sk_defer_lock);
+	spin_lock(&svsk->sk_lock);
 	list_add(&dr->handle.recent, &svsk->sk_deferred);
-	spin_unlock_bh(&svsk->sk_defer_lock);
+	spin_unlock(&svsk->sk_lock);
 	set_bit(SK_DEFERRED, &svsk->sk_flags);
 	svc_sock_enqueue(svsk);
 	svc_sock_put(svsk);
@@ -1917,7 +1918,7 @@ static struct svc_deferred_req *svc_defe
 
 	if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
 		return NULL;
-	spin_lock_bh(&svsk->sk_defer_lock);
+	spin_lock(&svsk->sk_lock);
 	clear_bit(SK_DEFERRED, &svsk->sk_flags);
 	if (!list_empty(&svsk->sk_deferred)) {
 		dr = list_entry(svsk->sk_deferred.next,
@@ -1926,6 +1927,6 @@ static struct svc_deferred_req *svc_defe
 		list_del_init(&dr->handle.recent);
 		set_bit(SK_DEFERRED, &svsk->sk_flags);
 	}
-	spin_unlock_bh(&svsk->sk_defer_lock);
+	spin_unlock(&svsk->sk_lock);
 	return dr;
 }

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2007-04-17  2:02 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-04-17  2:01 [PATCH 000 of 2] knfsd: Close oopsable race in nfsd NeilBrown
2007-04-17  2:01 ` [PATCH 001 of 2] knfsd: Use a spinlock to protect sk_info_authunix NeilBrown
2007-04-17  2:01 ` [PATCH 002 of 2] knfsd: Rename sk_defer_lock to sk_lock NeilBrown

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox