From: Chuck Lever <chuck.lever@oracle.com>
To: bfields@fieldses.org
Cc: linux-rdma@vger.kernel.org, linux-nfs@vger.kernel.org
Subject: [PATCH v1 08/19] svcrdma: Simplify svc_rdma_recv_ctxt_put
Date: Mon, 07 May 2018 15:27:32 -0400 [thread overview]
Message-ID: <20180507192732.4608.7636.stgit@klimt.1015granger.net> (raw)
In-Reply-To: <20180507192126.4608.63295.stgit@klimt.1015granger.net>
Currently svc_rdma_recv_ctxt_put's callers have to know whether they
want to free the ctxt's pages or not. This means the human
developers have to know when and why to set that free_pages
argument.
Instead, the ctxt should carry that information with it so that
svc_rdma_recv_ctxt_put does the right thing no matter who is
calling.
We want to keep track of the number of pages in the Receive buffer
separately from the number of pages pulled over by RDMA Read. This
is so that the correct number of pages can be freed properly and
that number is well-documented.
So now, rc_hdr_count is the number of pages consumed by head[0]
(ie., the page index where the Read chunk should start); and
rc_page_count is always the number of pages that need to be released
when the ctxt is put.
The @free_pages argument is no longer needed.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
include/linux/sunrpc/svc_rdma.h | 3 +-
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 41 +++++++++++++++++--------------
net/sunrpc/xprtrdma/svc_rdma_rw.c | 4 ++-
3 files changed, 25 insertions(+), 23 deletions(-)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 3cb6631..f0bd0b6d 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -173,8 +173,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
- struct svc_rdma_recv_ctxt *ctxt,
- int free_pages);
+ struct svc_rdma_recv_ctxt *ctxt);
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
extern int svc_rdma_recvfrom(struct svc_rqst *);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index b7d9c55..ecfe7c9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -175,18 +175,15 @@ static void svc_rdma_recv_ctxt_unmap(struct svcxprt_rdma *rdma,
* svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
* @rdma: controlling svcxprt_rdma
* @ctxt: object to return to the free list
- * @free_pages: Non-zero if rc_pages should be freed
*
*/
void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
- struct svc_rdma_recv_ctxt *ctxt,
- int free_pages)
+ struct svc_rdma_recv_ctxt *ctxt)
{
unsigned int i;
- if (free_pages)
- for (i = 0; i < ctxt->rc_page_count; i++)
- put_page(ctxt->rc_pages[i]);
+ for (i = 0; i < ctxt->rc_page_count; i++)
+ put_page(ctxt->rc_pages[i]);
spin_lock(&rdma->sc_recv_lock);
list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
spin_unlock(&rdma->sc_recv_lock);
@@ -243,11 +240,11 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
err_put_ctxt:
svc_rdma_recv_ctxt_unmap(rdma, ctxt);
- svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
return -ENOMEM;
err_post:
svc_rdma_recv_ctxt_unmap(rdma, ctxt);
- svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
svc_xprt_put(&rdma->sc_xprt);
return ret;
}
@@ -316,7 +313,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
ib_wc_status_msg(wc->status),
wc->status, wc->vendor_err);
post_err:
- svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
svc_xprt_enqueue(&rdma->sc_xprt);
out:
@@ -334,11 +331,11 @@ void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
list_del(&ctxt->rc_list);
- svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
}
while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
list_del(&ctxt->rc_list);
- svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
}
}
@@ -383,16 +380,19 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
len -= min_t(u32, len, ctxt->rc_sges[sge_no].length);
sge_no++;
}
+ ctxt->rc_hdr_count = sge_no;
rqstp->rq_respages = &rqstp->rq_pages[sge_no];
rqstp->rq_next_page = rqstp->rq_respages + 1;
/* If not all pages were used from the SGL, free the remaining ones */
- len = sge_no;
while (sge_no < ctxt->rc_recv_wr.num_sge) {
page = ctxt->rc_pages[sge_no++];
put_page(page);
}
- ctxt->rc_page_count = len;
+
+ /* @ctxt's pages have all been released or moved to @rqstp->rq_pages.
+ */
+ ctxt->rc_page_count = 0;
/* Set up tail */
rqstp->rq_arg.tail[0].iov_base = NULL;
@@ -602,11 +602,14 @@ static void rdma_read_complete(struct svc_rqst *rqstp,
{
int page_no;
- /* Copy RPC pages */
+ /* Move Read chunk pages to rqstp so that they will be released
+ * when svc_process is done with them.
+ */
for (page_no = 0; page_no < head->rc_page_count; page_no++) {
put_page(rqstp->rq_pages[page_no]);
rqstp->rq_pages[page_no] = head->rc_pages[page_no];
}
+ head->rc_page_count = 0;
/* Point rq_arg.pages past header */
rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
@@ -777,7 +780,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
if (svc_rdma_is_backchannel_reply(xprt, p)) {
ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
&rqstp->rq_arg);
- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return ret;
}
@@ -786,7 +789,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
goto out_readchunk;
complete:
- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
rqstp->rq_prot = IPPROTO_MAX;
svc_xprt_copy_addrs(rqstp, xprt);
return rqstp->rq_arg.len;
@@ -799,16 +802,16 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
out_err:
svc_rdma_send_error(rdma_xprt, p, ret);
- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return 0;
out_postfail:
if (ret == -EINVAL)
svc_rdma_send_error(rdma_xprt, p, ret);
- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return ret;
out_drop:
- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return 0;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index c080ce2..8242aa3 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -282,7 +282,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
ib_wc_status_msg(wc->status),
wc->status, wc->vendor_err);
- svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt, 1);
+ svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
} else {
spin_lock(&rdma->sc_rq_dto_lock);
list_add_tail(&info->ri_readctxt->rc_list,
@@ -834,7 +834,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
* head->rc_arg. Pages involved with RDMA Read I/O are
* transferred there.
*/
- head->rc_hdr_count = head->rc_page_count;
+ head->rc_page_count = head->rc_hdr_count;
head->rc_arg.head[0] = rqstp->rq_arg.head[0];
head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
head->rc_arg.pages = head->rc_pages;
next prev parent reply other threads:[~2018-05-07 19:27 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-07 19:26 [PATCH v1 00/19] NFS/RDMA server for-next Chuck Lever
2018-05-07 19:26 ` [PATCH v1 01/19] svcrdma: Add proper SPDX tags for NetApp-contributed source Chuck Lever
2018-05-09 20:23 ` J. Bruce Fields
2018-05-09 20:42 ` Chuck Lever
2018-05-15 14:52 ` Doug Ledford
2018-05-07 19:27 ` [PATCH v1 02/19] svcrdma: Use passed-in net namespace when creating RDMA listener Chuck Lever
2018-05-07 19:27 ` [PATCH v1 03/19] xprtrdma: Prepare RPC/RDMA includes for server-side trace points Chuck Lever
2018-05-07 19:27 ` [PATCH v1 04/19] svcrdma: Trace key RPC/RDMA protocol events Chuck Lever
2018-05-07 19:27 ` [PATCH v1 05/19] svcrdma: Trace key RDMA API events Chuck Lever
2018-05-07 19:27 ` [PATCH v1 06/19] svcrdma: Introduce svc_rdma_recv_ctxt Chuck Lever
2018-05-09 20:48 ` J. Bruce Fields
2018-05-09 21:02 ` Chuck Lever
2018-05-07 19:27 ` [PATCH v1 07/19] svcrdma: Remove sc_rq_depth Chuck Lever
2018-05-07 19:27 ` Chuck Lever [this message]
2018-05-07 19:27 ` [PATCH v1 09/19] svcrdma: Preserve Receive buffer until svc_rdma_sendto Chuck Lever
2018-05-09 21:03 ` J. Bruce Fields
2018-05-07 19:27 ` [PATCH v1 10/19] svcrdma: Persistently allocate and DMA-map Receive buffers Chuck Lever
2018-05-09 21:18 ` J. Bruce Fields
2018-05-09 21:31 ` Chuck Lever
2018-05-09 21:37 ` Bruce Fields
2018-05-07 19:27 ` [PATCH v1 11/19] svcrdma: Allocate recv_ctxt's on CPU handling Receives Chuck Lever
2018-05-07 19:27 ` [PATCH v1 12/19] svcrdma: Refactor svc_rdma_dma_map_buf Chuck Lever
2018-05-07 19:27 ` [PATCH v1 13/19] svcrdma: Clean up Send SGE accounting Chuck Lever
2018-05-07 19:28 ` [PATCH v1 14/19] svcrdma: Introduce svc_rdma_send_ctxt Chuck Lever
2018-05-07 19:28 ` [PATCH v1 15/19] svcrdma: Don't overrun the SGE array in svc_rdma_send_ctxt Chuck Lever
2018-05-07 19:28 ` [PATCH v1 16/19] svcrdma: Remove post_send_wr Chuck Lever
2018-05-07 19:28 ` [PATCH v1 17/19] svcrdma: Simplify svc_rdma_send() Chuck Lever
2018-05-07 19:28 ` [PATCH v1 18/19] svcrdma: Persistently allocate and DMA-map Send buffers Chuck Lever
2018-05-07 19:28 ` [PATCH v1 19/19] svcrdma: Remove unused svc_rdma_op_ctxt Chuck Lever
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180507192732.4608.7636.stgit@klimt.1015granger.net \
--to=chuck.lever@oracle.com \
--cc=bfields@fieldses.org \
--cc=linux-nfs@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).