linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "J. Bruce Fields" <bfields@fieldses.org>
To: Trond Myklebust <trond.myklebust@primarydata.com>
Cc: linux-nfs@vger.kernel.org
Subject: Re: [PATCH 08/10] SUNRPC: Add a server side per-connection limit
Date: Wed, 6 Jul 2016 12:16:43 -0400	[thread overview]
Message-ID: <20160706161643.GE18856@fieldses.org> (raw)
In-Reply-To: <1466780152-7154-8-git-send-email-trond.myklebust@primarydata.com>

The patch looks good, just a couple notes to myself:

On Fri, Jun 24, 2016 at 10:55:50AM -0400, Trond Myklebust wrote:
> Allow the user to limit the number of requests serviced through a single
> connection.
> 
> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
> ---
>  Documentation/kernel-parameters.txt |  6 ++++++
>  include/linux/sunrpc/svc.h          |  1 +
>  include/linux/sunrpc/svc_xprt.h     |  1 +
>  net/sunrpc/svc_xprt.c               | 39 ++++++++++++++++++++++++++++++++++---
>  4 files changed, 44 insertions(+), 3 deletions(-)
> 
> diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
> index 82b42c958d1c..48ba6d2e670a 100644
> --- a/Documentation/kernel-parameters.txt
> +++ b/Documentation/kernel-parameters.txt
> @@ -3832,6 +3832,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
>  			using these two parameters to set the minimum and
>  			maximum port values.
>  
> +	sunrpc.svc_rpc_per_connection_limit=
> +			[NFS,SUNRPC]
> +			Limit the number of requests that the server will
> +			process in parallel from a single connection.
> +			The default value is 0 (no limit).
> +

Since every connection belongs to only one network namespace, we could
easily impose a different limit per network namespace.  But module
parameters are stuck being global.

On the other hand, a module parameter may be simpler than the
alternatives, and we can always add a second interface later in the
unlikely event somebody cares about this, so.... OK.

>  	sunrpc.pool_mode=
>  			[NFS]
>  			Control how the NFS server code allocates CPUs to
> diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> index 7ca44fb5b675..7321ae933867 100644
> --- a/include/linux/sunrpc/svc.h
> +++ b/include/linux/sunrpc/svc.h
> @@ -268,6 +268,7 @@ struct svc_rqst {
>  						 * cache pages */
>  #define	RQ_VICTIM	(5)			/* about to be shut down */
>  #define	RQ_BUSY		(6)			/* request is busy */
> +#define	RQ_DATA		(7)			/* request has data */
>  	unsigned long		rq_flags;	/* flags field */
>  
>  	void *			rq_argp;	/* decoded arguments */
> diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
> index b7dabc4baafd..555c004de51e 100644
> --- a/include/linux/sunrpc/svc_xprt.h
> +++ b/include/linux/sunrpc/svc_xprt.h
> @@ -69,6 +69,7 @@ struct svc_xprt {
>  
>  	struct svc_serv		*xpt_server;	/* service for transport */
>  	atomic_t    	    	xpt_reserved;	/* space on outq that is rsvd */
> +	atomic_t		xpt_nr_rqsts;	/* Number of requests */
>  	struct mutex		xpt_mutex;	/* to serialize sending data */
>  	spinlock_t		xpt_lock;	/* protects sk_deferred
>  						 * and xpt_auth_cache */
> diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
> index eccc61237ca9..cd26d0d9e41f 100644
> --- a/net/sunrpc/svc_xprt.c
> +++ b/net/sunrpc/svc_xprt.c
> @@ -21,6 +21,10 @@
>  
>  #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
>  
> +static unsigned int svc_rpc_per_connection_limit __read_mostly;
> +module_param(svc_rpc_per_connection_limit, uint, 0644);
> +
> +
>  static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
>  static int svc_deferred_recv(struct svc_rqst *rqstp);
>  static struct cache_deferred_req *svc_defer(struct cache_req *req);
> @@ -327,12 +331,41 @@ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
>  }
>  EXPORT_SYMBOL_GPL(svc_print_addr);
>  
> +static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
> +{
> +	unsigned int limit = svc_rpc_per_connection_limit;
> +	int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
> +
> +	return limit == 0 || (nrqsts >= 0 && nrqsts < limit);
> +}
> +
> +static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
> +{
> +	if (!test_bit(RQ_DATA, &rqstp->rq_flags)) {
> +		if (!svc_xprt_slots_in_range(xprt))
> +			return false;
> +		atomic_inc(&xprt->xpt_nr_rqsts);
> +		set_bit(RQ_DATA, &rqstp->rq_flags);

The separate test_bit() and set_bit() had me scratching my head for a
moment....  OK, all of the RQ_DATA uses are only from the unique thread
associated with this rqstp, so there should be no races (and the
test_and_clear_bit() below is just for convenience, I guess, we don't
really need it to be atomic).

--b.

> +	}
> +	return true;
> +}
> +
> +static void svc_xprt_release_slot(struct svc_rqst *rqstp)
> +{
> +	struct svc_xprt	*xprt = rqstp->rq_xprt;
> +	if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
> +		atomic_dec(&xprt->xpt_nr_rqsts);
> +		svc_xprt_enqueue(xprt);
> +	}
> +}
> +
>  static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
>  {
>  	if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
>  		return true;
>  	if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) {
> -		if (xprt->xpt_ops->xpo_has_wspace(xprt))
> +		if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
> +		    svc_xprt_slots_in_range(xprt))
>  			return true;
>  		trace_svc_xprt_no_write_space(xprt);
>  		return false;
> @@ -514,8 +547,8 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
>  
>  	rqstp->rq_res.head[0].iov_len = 0;
>  	svc_reserve(rqstp, 0);
> +	svc_xprt_release_slot(rqstp);
>  	rqstp->rq_xprt = NULL;
> -
>  	svc_xprt_put(xprt);
>  }
>  
> @@ -783,7 +816,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
>  			svc_add_new_temp_xprt(serv, newxpt);
>  		else
>  			module_put(xprt->xpt_class->xcl_owner);
> -	} else {
> +	} else if (svc_xprt_reserve_slot(rqstp, xprt)) {
>  		/* XPT_DATA|XPT_DEFERRED case: */
>  		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
>  			rqstp, rqstp->rq_pool->sp_id, xprt,
> -- 
> 2.7.4

  parent reply	other threads:[~2016-07-06 16:16 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-24 14:55 [PATCH 01/10] SUNRPC: Fix tracepoint storage issues with svc_recv and svc_rqst_status Trond Myklebust
2016-06-24 14:55 ` [PATCH 02/10] SUNRPC: Don't allocate a full sockaddr_storage for tracing Trond Myklebust
2016-06-24 14:55   ` [PATCH 03/10] SUNRPC: Add a tracepoint for server socket out-of-space conditions Trond Myklebust
2016-06-24 14:55     ` [PATCH 04/10] SUNRPC: Add tracepoints for dropped and deferred requests Trond Myklebust
2016-06-24 14:55       ` [PATCH 05/10] SUNRPC: lock the socket while detaching it Trond Myklebust
2016-06-24 14:55         ` [PATCH 06/10] SUNRPC: Call the default socket callbacks instead of open coding Trond Myklebust
2016-06-24 14:55           ` [PATCH 07/10] SUNRPC: Micro optimisation for svc_data_ready Trond Myklebust
2016-06-24 14:55             ` [PATCH 08/10] SUNRPC: Add a server side per-connection limit Trond Myklebust
2016-06-24 14:55               ` [PATCH 09/10] SUNRPC: Change TCP socket space reservation Trond Myklebust
2016-06-24 14:55                 ` [PATCH 10/10] SUNRPC: Remove unused callback xpo_adjust_wspace() Trond Myklebust
2016-06-24 21:18                 ` [PATCH 09/10] SUNRPC: Change TCP socket space reservation J. Bruce Fields
2016-06-24 21:31                   ` Trond Myklebust
2016-07-06 20:17                     ` Fields Bruce
2016-07-06 19:53                 ` J. Bruce Fields
2016-07-06 20:11                   ` Trond Myklebust
2016-07-06 20:30                     ` Fields Bruce
2016-06-24 21:15               ` [PATCH 08/10] SUNRPC: Add a server side per-connection limit J. Bruce Fields
2016-06-24 21:24                 ` Trond Myklebust
2016-07-06 16:16               ` J. Bruce Fields [this message]
2016-06-24 21:06         ` [PATCH 05/10] SUNRPC: lock the socket while detaching it J. Bruce Fields
2016-06-24 21:21           ` Trond Myklebust
2016-06-24 18:28 ` [PATCH 01/10] SUNRPC: Fix tracepoint storage issues with svc_recv and svc_rqst_status J. Bruce Fields

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160706161643.GE18856@fieldses.org \
    --to=bfields@fieldses.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=trond.myklebust@primarydata.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).