Linux NFS development
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: linux-rdma@vger.kernel.org, linux-nfs@vger.kernel.org
Subject: [PATCH v2 3/7] svcrdma: Define maximum number of backchannel requests
Date: Mon, 30 Nov 2015 17:24:49 -0500	[thread overview]
Message-ID: <20151130222449.13029.35573.stgit@klimt.1015granger.net> (raw)
In-Reply-To: <20151130222141.13029.98664.stgit@klimt.1015granger.net>

Extra resources for handling backchannel requests have to be
pre-allocated when a transport instance is created. Set a limit.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/linux/sunrpc/svc_rdma.h          |    2 ++
 net/sunrpc/xprtrdma/svc_rdma_transport.c |   14 +++++++++-----
 2 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cc69551..c189fbd 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -137,6 +137,7 @@ struct svcxprt_rdma {
 
 	int                  sc_max_requests;	/* Depth of RQ */
 	int                  sc_max_req_size;	/* Size of each RQ WR buf */
+	int                  sc_max_bc_requests;
 
 	struct ib_pd         *sc_pd;
 
@@ -178,6 +179,7 @@ struct svcxprt_rdma {
 #define RPCRDMA_SQ_DEPTH_MULT   8
 #define RPCRDMA_MAX_REQUESTS    32
 #define RPCRDMA_MAX_REQ_SIZE    4096
+#define RPCRDMA_MAX_BC_REQUESTS	2
 
 #define RPCSVC_MAXPAYLOAD_RDMA	RPCSVC_MAXPAYLOAD
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 94b8d4c..643402e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -541,6 +541,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
 
 	cma_xprt->sc_max_req_size = svcrdma_max_req_size;
 	cma_xprt->sc_max_requests = svcrdma_max_requests;
+	cma_xprt->sc_max_bc_requests = RPCRDMA_MAX_BC_REQUESTS;
 	cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
 	atomic_set(&cma_xprt->sc_sq_count, 0);
 	atomic_set(&cma_xprt->sc_ctxt_used, 0);
@@ -897,6 +898,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	struct ib_device_attr devattr;
 	int uninitialized_var(dma_mr_acc);
 	int need_dma_mr = 0;
+	int total_reqs;
 	int ret;
 	int i;
 
@@ -932,8 +934,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	newxprt->sc_max_sge_rd = min_t(size_t, devattr.max_sge_rd,
 				       RPCSVC_MAXPAGES);
 	newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
-				   (size_t)svcrdma_max_requests);
-	newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
+				       (size_t)svcrdma_max_requests);
+	newxprt->sc_max_bc_requests = RPCRDMA_MAX_BC_REQUESTS;
+	total_reqs = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
+	newxprt->sc_sq_depth = total_reqs * RPCRDMA_SQ_DEPTH_MULT;
 
 	/*
 	 * Limit ORD based on client limit, local device limit, and
@@ -957,7 +961,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 		dprintk("svcrdma: error creating SQ CQ for connect request\n");
 		goto errout;
 	}
-	cq_attr.cqe = newxprt->sc_max_requests;
+	cq_attr.cqe = total_reqs;
 	newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
 					 rq_comp_handler,
 					 cq_event_handler,
@@ -972,7 +976,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	qp_attr.event_handler = qp_event_handler;
 	qp_attr.qp_context = &newxprt->sc_xprt;
 	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
-	qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
+	qp_attr.cap.max_recv_wr = total_reqs;
 	qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
 	qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -1068,7 +1072,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 			newxprt->sc_cm_id->device->local_dma_lkey;
 
 	/* Post receive buffers */
-	for (i = 0; i < newxprt->sc_max_requests; i++) {
+	for (i = 0; i < total_reqs; i++) {
 		ret = svc_rdma_post_recv(newxprt);
 		if (ret) {
 			dprintk("svcrdma: failure posting receive buffers\n");


  parent reply	other threads:[~2015-11-30 22:24 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-30 22:24 [PATCH v2 0/8] NFS/RDMA server patches for 4.5 Chuck Lever
2015-11-30 22:24 ` [PATCH v2 1/7] svcrdma: Do not send XDR roundup bytes for a write chunk Chuck Lever
2015-11-30 22:24 ` [PATCH v2 2/7] svcrdma: Add svc_rdma_get_context() API that is allowed to fail Chuck Lever
2015-11-30 22:24 ` Chuck Lever [this message]
2015-11-30 22:24 ` [PATCH v2 4/7] svcrdma: Add infrastructure to send backwards direction RPC/RDMA calls Chuck Lever
2015-11-30 22:25 ` [PATCH v2 5/7] svcrdma: Add infrastructure to receive backwards direction RPC/RDMA replies Chuck Lever
2015-11-30 22:25 ` [PATCH v2 6/7] xprtrdma: Add class for RDMA backwards direction transport Chuck Lever
2015-12-01 13:38   ` Tom Talpey
2015-12-01 14:36     ` Chuck Lever
2015-11-30 22:25 ` [PATCH v2 7/7] svcrdma: No need to count WRs in svc_rdma_send() Chuck Lever

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20151130222449.13029.35573.stgit@klimt.1015granger.net \
    --to=chuck.lever@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox