linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "J. Bruce Fields" <bfields@redhat.com>
To: linux-nfs@vger.kernel.org
Cc: "J. Bruce Fields" <bfields@redhat.com>
Subject: [PATCH 12/14] svcrpc: break up svc_recv
Date: Tue, 21 Aug 2012 16:57:30 -0400	[thread overview]
Message-ID: <1345582652-18476-13-git-send-email-bfields@redhat.com> (raw)
In-Reply-To: <1345582652-18476-1-git-send-email-bfields@redhat.com>

From: "J. Bruce Fields" <bfields@redhat.com>

Matter of taste, I suppose, but svc_recv breaks up naturally into:

	allocate pages and setup arg
	dequeue (wait for, if necessary) next socket
	do something with that socket

And I find it easier to read when it doesn't go on for pages and pages.

Signed-off-by: J. Bruce Fields <bfields@redhat.com>
---
 net/sunrpc/svc_xprt.c |  103 ++++++++++++++++++++++++++++++++-----------------
 1 file changed, 67 insertions(+), 36 deletions(-)

diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 295e6ed..6ebc9a9 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -568,33 +568,12 @@ static void svc_check_conn_limits(struct svc_serv *serv)
 	}
 }
 
-/*
- * Receive the next request on any transport.  This code is carefully
- * organised not to touch any cachelines in the shared svc_serv
- * structure, only cachelines in the local svc_pool.
- */
-int svc_recv(struct svc_rqst *rqstp, long timeout)
+int svc_alloc_arg(struct svc_rqst *rqstp)
 {
-	struct svc_xprt		*xprt = NULL;
-	struct svc_serv		*serv = rqstp->rq_server;
-	struct svc_pool		*pool = rqstp->rq_pool;
-	int			len, i;
-	int			pages;
-	struct xdr_buf		*arg;
-	DECLARE_WAITQUEUE(wait, current);
-	long			time_left;
-
-	dprintk("svc: server %p waiting for data (to = %ld)\n",
-		rqstp, timeout);
-
-	if (rqstp->rq_xprt)
-		printk(KERN_ERR
-			"svc_recv: service %p, transport not NULL!\n",
-			 rqstp);
-	if (waitqueue_active(&rqstp->rq_wait))
-		printk(KERN_ERR
-			"svc_recv: service %p, wait queue active!\n",
-			 rqstp);
+	struct svc_serv *serv = rqstp->rq_server;
+	struct xdr_buf *arg;
+	int pages;
+	int i;
 
 	/* now allocate needed pages.  If we get a failure, sleep briefly */
 	pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
@@ -624,11 +603,15 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 	arg->page_len = (pages-2)*PAGE_SIZE;
 	arg->len = (pages-1)*PAGE_SIZE;
 	arg->tail[0].iov_len = 0;
+	return 0;
+}
 
-	try_to_freeze();
-	cond_resched();
-	if (signalled() || kthread_should_stop())
-		return -EINTR;
+struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
+{
+	struct svc_xprt *xprt;
+	struct svc_pool		*pool = rqstp->rq_pool;
+	DECLARE_WAITQUEUE(wait, current);
+	long			time_left;
 
 	/* Normally we will wait up to 5 seconds for any required
 	 * cache information to be provided.
@@ -666,7 +649,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 		if (kthread_should_stop()) {
 			set_current_state(TASK_RUNNING);
 			spin_unlock_bh(&pool->sp_lock);
-			return -EINTR;
+			return ERR_PTR(-EINTR);
 		}
 
 		add_wait_queue(&rqstp->rq_wait, &wait);
@@ -687,19 +670,25 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 			spin_unlock_bh(&pool->sp_lock);
 			dprintk("svc: server %p, no data yet\n", rqstp);
 			if (signalled() || kthread_should_stop())
-				return -EINTR;
+				return ERR_PTR(-EINTR);
 			else
-				return -EAGAIN;
+				return ERR_PTR(-EAGAIN);
 		}
 	}
 	spin_unlock_bh(&pool->sp_lock);
+	return xprt;
+}
+
+static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+{
+	struct svc_serv *serv = rqstp->rq_server;
+	int len = 0;
 
-	len = 0;
 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
 		dprintk("svc_recv: found XPT_CLOSE\n");
 		svc_delete_xprt(xprt);
 		/* Leave XPT_BUSY set on the dead xprt: */
-		goto out;
+		return 0;
 	}
 	if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
 		struct svc_xprt *newxpt;
@@ -727,8 +716,9 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 			svc_xprt_received(newxpt);
 		}
 	} else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
+		/* XPT_DATA|XPT_DEFERRED case: */
 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
-			rqstp, pool->sp_id, xprt,
+			rqstp, rqstp->rq_pool->sp_id, xprt,
 			atomic_read(&xprt->xpt_ref.refcount));
 		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
 		if (rqstp->rq_deferred)
@@ -739,7 +729,48 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 		rqstp->rq_reserved = serv->sv_max_mesg;
 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 	}
+	/* clear XPT_BUSY: */
 	svc_xprt_received(xprt);
+	return len;
+}
+
+/*
+ * Receive the next request on any transport.  This code is carefully
+ * organised not to touch any cachelines in the shared svc_serv
+ * structure, only cachelines in the local svc_pool.
+ */
+int svc_recv(struct svc_rqst *rqstp, long timeout)
+{
+	struct svc_xprt		*xprt = NULL;
+	struct svc_serv		*serv = rqstp->rq_server;
+	int			len, err;
+
+	dprintk("svc: server %p waiting for data (to = %ld)\n",
+		rqstp, timeout);
+
+	if (rqstp->rq_xprt)
+		printk(KERN_ERR
+			"svc_recv: service %p, transport not NULL!\n",
+			 rqstp);
+	if (waitqueue_active(&rqstp->rq_wait))
+		printk(KERN_ERR
+			"svc_recv: service %p, wait queue active!\n",
+			 rqstp);
+
+	err = svc_alloc_arg(rqstp);
+	if (err)
+		return err;
+
+	try_to_freeze();
+	cond_resched();
+	if (signalled() || kthread_should_stop())
+		return -EINTR;
+
+	xprt = svc_get_next_xprt(rqstp, timeout);
+	if (IS_ERR(xprt))
+		return PTR_ERR(xprt);
+
+	len = svc_handle_xprt(rqstp, xprt);
 
 	/* No data, incomplete (TCP) read, or accept() */
 	if (len <= 0)
-- 
1.7.9.5


  parent reply	other threads:[~2012-08-21 20:57 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-08-21 20:57 nfsd & svcrpc patches (mainly cleanup) for 3.7 J. Bruce Fields
2012-08-21 20:57 ` [PATCH 01/14] svcrpc: standardize svc_setup_socket return convention J. Bruce Fields
2012-08-21 20:57 ` [PATCH 02/14] svcrpc: clean up control flow J. Bruce Fields
2012-08-21 20:57 ` [PATCH 03/14] svcrpc: make svc_create_xprt enqueue on clearing XPT_BUSY J. Bruce Fields
2012-08-21 20:57 ` [PATCH 04/14] svcrpc: share some setup of listening sockets J. Bruce Fields
2012-08-21 20:57 ` [PATCH 05/14] nfsd: remove redundant "port" argument J. Bruce Fields
2012-08-21 20:57 ` [PATCH 06/14] nfsd: allow configuring nfsd to listen on 5-digit ports J. Bruce Fields
2012-08-21 21:25   ` J. Bruce Fields
2012-08-21 20:57 ` [PATCH 07/14] svcrpc: minor udp code cleanup J. Bruce Fields
2012-08-21 20:57 ` [PATCH 08/14] svcrpc: ignore unknown address type in udp receive J. Bruce Fields
2012-08-21 21:02   ` Chuck Lever
2012-08-21 21:24     ` J. Bruce Fields
2012-08-21 21:29       ` Chuck Lever
2012-08-21 21:33         ` J. Bruce Fields
2012-08-21 21:38           ` Chuck Lever
2012-08-21 21:42             ` J. Bruce Fields
2012-08-21 20:57 ` [PATCH 09/14] svcrpc: make xpo_recvfrom return only >=0 J. Bruce Fields
2012-08-21 20:57 ` [PATCH 10/14] svcrpc: remove handling of unknown errors from svc_recv J. Bruce Fields
2012-08-21 20:57 ` [PATCH 11/14] svcrpc: make svc_xprt_received static J. Bruce Fields
2012-08-21 20:57 ` J. Bruce Fields [this message]
2012-08-21 20:57 ` [PATCH 13/14] svcrpc: split up svc_handle_xprt J. Bruce Fields
2012-08-21 20:57 ` [PATCH 14/14] nfsd: document kernel interfaces for nfsd configuration J. Bruce Fields

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1345582652-18476-13-git-send-email-bfields@redhat.com \
    --to=bfields@redhat.com \
    --cc=linux-nfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).