From: Chuck Lever <chuck.lever@oracle.com>
To: linux-nfs@vger.kernel.org, linux-rdma@vger.kernel.org
Subject: [PATCH v4 13/24] xprtrmda: Reduce calls to ib_poll_cq() in completion handlers
Date: Wed, 21 May 2014 20:56:08 -0400 [thread overview]
Message-ID: <20140522005608.27190.93035.stgit@manet.1015granger.net> (raw)
In-Reply-To: <20140522004505.27190.58897.stgit@manet.1015granger.net>
Change the completion handlers to grab up to 16 items per
ib_poll_cq() call. No extra ib_poll_cq() is needed if fewer than 16
items are returned.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
net/sunrpc/xprtrdma/verbs.c | 56 ++++++++++++++++++++++++++-------------
net/sunrpc/xprtrdma/xprt_rdma.h | 4 +++
2 files changed, 42 insertions(+), 18 deletions(-)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index c7d5281..b8caee9 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -162,14 +162,23 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc)
}
static int
-rpcrdma_sendcq_poll(struct ib_cq *cq)
+rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
{
- struct ib_wc wc;
- int rc;
+ struct ib_wc *wcs;
+ int count, rc;
- while ((rc = ib_poll_cq(cq, 1, &wc)) == 1)
- rpcrdma_sendcq_process_wc(&wc);
- return rc;
+ do {
+ wcs = ep->rep_send_wcs;
+
+ rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
+ if (rc <= 0)
+ return rc;
+
+ count = rc;
+ while (count-- > 0)
+ rpcrdma_sendcq_process_wc(wcs++);
+ } while (rc == RPCRDMA_POLLSIZE);
+ return 0;
}
/*
@@ -183,9 +192,10 @@ rpcrdma_sendcq_poll(struct ib_cq *cq)
static void
rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
{
+ struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
int rc;
- rc = rpcrdma_sendcq_poll(cq);
+ rc = rpcrdma_sendcq_poll(cq, ep);
if (rc) {
dprintk("RPC: %s: ib_poll_cq failed: %i\n",
__func__, rc);
@@ -202,7 +212,7 @@ rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
return;
}
- rpcrdma_sendcq_poll(cq);
+ rpcrdma_sendcq_poll(cq, ep);
}
static void
@@ -241,14 +251,23 @@ out_schedule:
}
static int
-rpcrdma_recvcq_poll(struct ib_cq *cq)
+rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
{
- struct ib_wc wc;
- int rc;
+ struct ib_wc *wcs;
+ int count, rc;
- while ((rc = ib_poll_cq(cq, 1, &wc)) == 1)
- rpcrdma_recvcq_process_wc(&wc);
- return rc;
+ do {
+ wcs = ep->rep_recv_wcs;
+
+ rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
+ if (rc <= 0)
+ return rc;
+
+ count = rc;
+ while (count-- > 0)
+ rpcrdma_recvcq_process_wc(wcs++);
+ } while (rc == RPCRDMA_POLLSIZE);
+ return 0;
}
/*
@@ -266,9 +285,10 @@ rpcrdma_recvcq_poll(struct ib_cq *cq)
static void
rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
{
+ struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
int rc;
- rc = rpcrdma_recvcq_poll(cq);
+ rc = rpcrdma_recvcq_poll(cq, ep);
if (rc) {
dprintk("RPC: %s: ib_poll_cq failed: %i\n",
__func__, rc);
@@ -285,7 +305,7 @@ rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
return;
}
- rpcrdma_recvcq_poll(cq);
+ rpcrdma_recvcq_poll(cq, ep);
}
#ifdef RPC_DEBUG
@@ -721,7 +741,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
- rpcrdma_cq_async_error_upcall, NULL,
+ rpcrdma_cq_async_error_upcall, ep,
ep->rep_attr.cap.max_send_wr + 1, 0);
if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq);
@@ -738,7 +758,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
}
recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
- rpcrdma_cq_async_error_upcall, NULL,
+ rpcrdma_cq_async_error_upcall, ep,
ep->rep_attr.cap.max_recv_wr + 1, 0);
if (IS_ERR(recvcq)) {
rc = PTR_ERR(recvcq);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 334ab6e..cb4c882 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -74,6 +74,8 @@ struct rpcrdma_ia {
* RDMA Endpoint -- one per transport instance
*/
+#define RPCRDMA_POLLSIZE (16)
+
struct rpcrdma_ep {
atomic_t rep_cqcount;
int rep_cqinit;
@@ -88,6 +90,8 @@ struct rpcrdma_ep {
struct rdma_conn_param rep_remote_cma;
struct sockaddr_storage rep_remote_addr;
struct delayed_work rep_connect_worker;
+ struct ib_wc rep_send_wcs[RPCRDMA_POLLSIZE];
+ struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE];
};
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
next prev parent reply other threads:[~2014-05-22 0:56 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-22 0:54 [PATCH v4 00/24] NFS/RDMA client patches for next merge Chuck Lever
2014-05-22 0:54 ` [PATCH v4 01/24] xprtrdma: mind the device's max fast register page list depth Chuck Lever
2014-05-22 0:54 ` [PATCH v4 02/24] nfs-rdma: Fix for FMR leaks Chuck Lever
2014-05-22 0:54 ` [PATCH v4 03/24] xprtrdma: RPC/RDMA must invoke xprt_wake_pending_tasks() in process context Chuck Lever
2014-05-22 0:54 ` [PATCH v4 04/24] xprtrdma: Remove BOUNCEBUFFERS memory registration mode Chuck Lever
2014-05-22 0:54 ` [PATCH v4 05/24] xprtrdma: Remove MEMWINDOWS registration modes Chuck Lever
2014-05-22 0:55 ` [PATCH v4 06/24] xprtrdma: Remove REGISTER memory registration mode Chuck Lever
2014-05-22 0:55 ` [PATCH v4 07/24] xprtrdma: Fall back to MTHCAFMR when FRMR is not supported Chuck Lever
2014-05-22 0:55 ` [PATCH v4 08/24] xprtrdma: mount reports "Invalid mount option" if memreg mode " Chuck Lever
2014-05-22 0:55 ` [PATCH v4 09/24] xprtrdma: Simplify rpcrdma_deregister_external() synopsis Chuck Lever
2014-05-22 0:55 ` [PATCH v4 10/24] xprtrdma: Make rpcrdma_ep_destroy() return void Chuck Lever
2014-05-22 0:55 ` [PATCH v4 11/24] xprtrdma: Split the completion queue Chuck Lever
2014-05-22 0:55 ` [PATCH v4 12/24] xprtrmda: Reduce lock contention in completion handlers Chuck Lever
2014-05-22 0:56 ` Chuck Lever [this message]
2014-05-22 0:56 ` [PATCH v4 14/24] xprtrdma: Limit work done by completion handler Chuck Lever
2014-05-22 0:56 ` [PATCH v4 15/24] xprtrdma: Reduce the number of hardway buffer allocations Chuck Lever
2014-05-22 0:56 ` [PATCH v4 16/24] xprtrdma: Ensure ia->ri_id->qp is not NULL when reconnecting Chuck Lever
2014-05-22 0:56 ` [PATCH v4 17/24] xprtrdma: Remove Tavor MTU setting Chuck Lever
2014-05-22 0:56 ` [PATCH v4 18/24] xprtrdma: Allocate missing pagelist Chuck Lever
2014-05-22 0:56 ` [PATCH v4 19/24] xprtrdma: Use macros for reconnection timeout constants Chuck Lever
2014-05-22 0:57 ` [PATCH v4 20/24] xprtrdma: Reset connection timeout after successful reconnect Chuck Lever
2014-05-22 2:07 ` Trond Myklebust
2014-05-22 3:28 ` Chuck Lever
2014-05-22 0:57 ` [PATCH v4 21/24] SUNRPC: Move congestion window contants to header file Chuck Lever
2014-05-22 0:57 ` [PATCH v4 22/24] xprtrdma: Avoid deadlock when credit window is reset Chuck Lever
2014-05-22 0:57 ` [PATCH v4 23/24] xprtrdma: Remove BUG_ON() call sites Chuck Lever
2014-05-22 0:57 ` [PATCH v4 24/24] xprtrdma: Disconnect on registration failure Chuck Lever
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20140522005608.27190.93035.stgit@manet.1015granger.net \
--to=chuck.lever@oracle.com \
--cc=linux-nfs@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).