From: Chuck Lever <chuck.lever@oracle.com>
To: Anna.Schumaker@netapp.com
Cc: linux-rdma@vger.kernel.org, linux-nfs@vger.kernel.org
Subject: [PATCH v4 10/21] xprtrdma: Properly handle exhaustion of the rb_mws list
Date: Tue, 22 Jul 2014 10:52:25 -0400 [thread overview]
Message-ID: <20140722145225.6010.88602.stgit@manet.1015granger.net> (raw)
In-Reply-To: <20140722144459.6010.99389.stgit@manet.1015granger.net>
If the rb_mws list is exhausted, clean up and return NULL so that
call_allocate() will delay and try again.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Tested-by: Shirley Ma <shirley.ma@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@emulex.com>
---
net/sunrpc/xprtrdma/verbs.c | 103 ++++++++++++++++++++++++++++++-------------
1 file changed, 71 insertions(+), 32 deletions(-)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index abac484..cdea6ff 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1256,6 +1256,67 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
kfree(buf->rb_pool);
}
+/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
+ * some req segments uninitialized.
+ */
+static void
+rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
+{
+ if (*mw) {
+ list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
+ *mw = NULL;
+ }
+}
+
+/* Cycle mw's back in reverse order, and "spin" them.
+ * This delays and scrambles reuse as much as possible.
+ */
+static void
+rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
+{
+ struct rpcrdma_mr_seg *seg = req->rl_segments;
+ struct rpcrdma_mr_seg *seg1 = seg;
+ int i;
+
+ for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
+ rpcrdma_buffer_put_mr(&seg->mr_chunk.rl_mw, buf);
+ rpcrdma_buffer_put_mr(&seg1->mr_chunk.rl_mw, buf);
+}
+
+static void
+rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
+{
+ buf->rb_send_bufs[--buf->rb_send_index] = req;
+ req->rl_niovs = 0;
+ if (req->rl_reply) {
+ buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
+ req->rl_reply->rr_func = NULL;
+ req->rl_reply = NULL;
+ }
+}
+
+static struct rpcrdma_req *
+rpcrdma_buffer_get_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
+{
+ struct rpcrdma_mw *r;
+ int i;
+
+ i = RPCRDMA_MAX_SEGS - 1;
+ while (!list_empty(&buf->rb_mws)) {
+ r = list_entry(buf->rb_mws.next,
+ struct rpcrdma_mw, mw_list);
+ list_del(&r->mw_list);
+ req->rl_segments[i].mr_chunk.rl_mw = r;
+ if (unlikely(i-- == 0))
+ return req; /* Success */
+ }
+
+ /* Not enough entries on rb_mws for this req */
+ rpcrdma_buffer_put_sendbuf(req, buf);
+ rpcrdma_buffer_put_mrs(req, buf);
+ return NULL;
+}
+
/*
* Get a set of request/reply buffers.
*
@@ -1268,10 +1329,9 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{
+ struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
struct rpcrdma_req *req;
unsigned long flags;
- int i;
- struct rpcrdma_mw *r;
spin_lock_irqsave(&buffers->rb_lock, flags);
if (buffers->rb_send_index == buffers->rb_max_requests) {
@@ -1291,14 +1351,13 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
}
buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
- if (!list_empty(&buffers->rb_mws)) {
- i = RPCRDMA_MAX_SEGS - 1;
- do {
- r = list_entry(buffers->rb_mws.next,
- struct rpcrdma_mw, mw_list);
- list_del(&r->mw_list);
- req->rl_segments[i].mr_chunk.rl_mw = r;
- } while (--i >= 0);
+ switch (ia->ri_memreg_strategy) {
+ case RPCRDMA_FRMR:
+ case RPCRDMA_MTHCAFMR:
+ req = rpcrdma_buffer_get_mrs(req, buffers);
+ break;
+ default:
+ break;
}
spin_unlock_irqrestore(&buffers->rb_lock, flags);
return req;
@@ -1313,34 +1372,14 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
{
struct rpcrdma_buffer *buffers = req->rl_buffer;
struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
- int i;
unsigned long flags;
spin_lock_irqsave(&buffers->rb_lock, flags);
- buffers->rb_send_bufs[--buffers->rb_send_index] = req;
- req->rl_niovs = 0;
- if (req->rl_reply) {
- buffers->rb_recv_bufs[--buffers->rb_recv_index] = req->rl_reply;
- req->rl_reply->rr_func = NULL;
- req->rl_reply = NULL;
- }
+ rpcrdma_buffer_put_sendbuf(req, buffers);
switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR:
case RPCRDMA_MTHCAFMR:
- /*
- * Cycle mw's back in reverse order, and "spin" them.
- * This delays and scrambles reuse as much as possible.
- */
- i = 1;
- do {
- struct rpcrdma_mw **mw;
- mw = &req->rl_segments[i].mr_chunk.rl_mw;
- list_add_tail(&(*mw)->mw_list, &buffers->rb_mws);
- *mw = NULL;
- } while (++i < RPCRDMA_MAX_SEGS);
- list_add_tail(&req->rl_segments[0].mr_chunk.rl_mw->mw_list,
- &buffers->rb_mws);
- req->rl_segments[0].mr_chunk.rl_mw = NULL;
+ rpcrdma_buffer_put_mrs(req, buffers);
break;
default:
break;
next prev parent reply other threads:[~2014-07-22 14:52 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-22 14:50 [PATCH v4 00/21] NFS/RDMA client patches for 3.17 Chuck Lever
2014-07-22 14:51 ` [PATCH v4 01/21] xprtrdma: Fix panic in rpcrdma_register_frmr_external() Chuck Lever
2014-07-22 14:51 ` [PATCH v4 02/21] xprtrdma: Protect ia->ri_id when unmapping/invalidating MRs Chuck Lever
2014-07-22 14:51 ` [PATCH v4 03/21] xprtrdma: Limit data payload size for ALLPHYSICAL Chuck Lever
2014-07-22 14:51 ` [PATCH v4 04/21] xprtrdma: Update rkeys after transport reconnect Chuck Lever
2014-07-22 18:40 ` Anna Schumaker
2014-07-24 20:03 ` Chuck Lever
2014-07-22 14:51 ` [PATCH v4 05/21] xprtrdma: On disconnect, don't ignore pending CQEs Chuck Lever
2014-07-22 14:51 ` [PATCH v4 06/21] xprtrdma: Don't invalidate FRMRs if registration fails Chuck Lever
2014-07-22 14:51 ` [PATCH v4 07/21] xprtrdma: Unclutter struct rpcrdma_mr_seg Chuck Lever
2014-07-22 14:52 ` [PATCH v4 08/21] xprtrdma: Back off rkey when FAST_REG_MR fails Chuck Lever
2014-07-22 20:03 ` Anna Schumaker
2014-07-22 21:18 ` Chuck Lever
2014-07-23 13:44 ` Anna Schumaker
2014-07-24 13:24 ` Anna Schumaker
2014-07-30 19:11 ` Anna Schumaker
2014-07-22 14:52 ` [PATCH v4 09/21] xprtrdma: Chain together all MWs in same buffer pool Chuck Lever
2014-07-22 14:52 ` Chuck Lever [this message]
2014-07-22 14:52 ` [PATCH v4 11/21] xprtrdma: Reset FRMRs when FAST_REG_MR is flushed by a disconnect Chuck Lever
2014-07-22 14:52 ` [PATCH v4 12/21] xprtrdma: Reset FRMRs after a flushed LOCAL_INV Work Request Chuck Lever
2014-07-22 14:52 ` [PATCH v4 13/21] xprtrdma: Don't post a LOCAL_INV in rpcrdma_register_frmr_external() Chuck Lever
2014-07-22 14:53 ` [PATCH v4 14/21] xprtrdma: Disable completions for FAST_REG_MR Work Requests Chuck Lever
2014-07-22 14:53 ` [PATCH v4 15/21] xprtrdma: Disable completions for LOCAL_INV " Chuck Lever
2014-07-22 14:53 ` [PATCH v4 16/21] xprtrdma: Rename frmr_wr Chuck Lever
2014-07-22 14:53 ` [PATCH v4 17/21] xprtrdma: Allocate each struct rpcrdma_mw separately Chuck Lever
2014-07-22 14:53 ` [PATCH v4 18/21] xprtrdma: Schedule reply tasklet once per upcall Chuck Lever
2014-07-22 14:53 ` [PATCH v4 19/21] xprtrdma: Make rpcrdma_ep_disconnect() return void Chuck Lever
2014-07-22 14:53 ` [PATCH v4 20/21] xprtrdma: Remove RPCRDMA_PERSISTENT_REGISTRATION macro Chuck Lever
2014-07-22 14:54 ` [PATCH v4 21/21] xprtrdma: Handle additional connection events Chuck Lever
2014-07-23 18:53 ` [PATCH v4 00/21] NFS/RDMA client patches for 3.17 Anna Schumaker
2014-07-23 21:23 ` Chuck Lever
2014-07-24 13:19 ` Anna Schumaker
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20140722145225.6010.88602.stgit@manet.1015granger.net \
--to=chuck.lever@oracle.com \
--cc=Anna.Schumaker@netapp.com \
--cc=linux-nfs@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).