From: "J. Bruce Fields" <bfields-uC3wQj2KruNg9hUCZPvPmw@public.gmane.org>
To: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
linux-nfs-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
tom-/Yg/VP3ZvrM@public.gmane.org
Subject: Re: [PATCH] svcrdma: send_write() must not overflow the device's max sge
Date: Thu, 10 Jul 2014 16:17:02 -0400 [thread overview]
Message-ID: <20140710201702.GG26561@fieldses.org> (raw)
In-Reply-To: <20140709184914.22217.59770.stgit-T4OLL4TyM9aNDNWfRnPdfg@public.gmane.org>
On Wed, Jul 09, 2014 at 01:49:15PM -0500, Steve Wise wrote:
> Function send_write() must stop creating sges when it reaches the device
> max and return the amount sent in the RDMA Write to the caller.
Queueing up for 3.17, thanks.--b.
>
> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> ---
>
> net/sunrpc/xprtrdma/svc_rdma_sendto.c | 39 +++++++++++++--------------------
> 1 files changed, 15 insertions(+), 24 deletions(-)
>
> diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
> index 49fd21a..9f1b506 100644
> --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
> +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
> @@ -192,6 +192,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
> xdr_sge_no++;
> BUG_ON(xdr_sge_no > vec->count);
> bc -= sge_bytes;
> + if (sge_no == xprt->sc_max_sge)
> + break;
> }
>
> /* Prepare WRITE WR */
> @@ -209,7 +211,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
> atomic_inc(&rdma_stat_write);
> if (svc_rdma_send(xprt, &write_wr))
> goto err;
> - return 0;
> + return write_len - bc;
> err:
> svc_rdma_unmap_dma(ctxt);
> svc_rdma_put_context(ctxt, 0);
> @@ -225,7 +227,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
> {
> u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
> int write_len;
> - int max_write;
> u32 xdr_off;
> int chunk_off;
> int chunk_no;
> @@ -239,8 +240,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
> res_ary = (struct rpcrdma_write_array *)
> &rdma_resp->rm_body.rm_chunks[1];
>
> - max_write = xprt->sc_max_sge * PAGE_SIZE;
> -
> /* Write chunks start at the pagelist */
> for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
> xfer_len && chunk_no < arg_ary->wc_nchunks;
> @@ -260,23 +259,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
> write_len);
> chunk_off = 0;
> while (write_len) {
> - int this_write;
> - this_write = min(write_len, max_write);
> ret = send_write(xprt, rqstp,
> ntohl(arg_ch->rs_handle),
> rs_offset + chunk_off,
> xdr_off,
> - this_write,
> + write_len,
> vec);
> - if (ret) {
> + if (ret <= 0) {
> dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
> ret);
> return -EIO;
> }
> - chunk_off += this_write;
> - xdr_off += this_write;
> - xfer_len -= this_write;
> - write_len -= this_write;
> + chunk_off += ret;
> + xdr_off += ret;
> + xfer_len -= ret;
> + write_len -= ret;
> }
> }
> /* Update the req with the number of chunks actually used */
> @@ -293,7 +290,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
> {
> u32 xfer_len = rqstp->rq_res.len;
> int write_len;
> - int max_write;
> u32 xdr_off;
> int chunk_no;
> int chunk_off;
> @@ -311,8 +307,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
> res_ary = (struct rpcrdma_write_array *)
> &rdma_resp->rm_body.rm_chunks[2];
>
> - max_write = xprt->sc_max_sge * PAGE_SIZE;
> -
> /* xdr offset starts at RPC message */
> nchunks = ntohl(arg_ary->wc_nchunks);
> for (xdr_off = 0, chunk_no = 0;
> @@ -330,24 +324,21 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
> write_len);
> chunk_off = 0;
> while (write_len) {
> - int this_write;
> -
> - this_write = min(write_len, max_write);
> ret = send_write(xprt, rqstp,
> ntohl(ch->rs_handle),
> rs_offset + chunk_off,
> xdr_off,
> - this_write,
> + write_len,
> vec);
> - if (ret) {
> + if (ret <= 0) {
> dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
> ret);
> return -EIO;
> }
> - chunk_off += this_write;
> - xdr_off += this_write;
> - xfer_len -= this_write;
> - write_len -= this_write;
> + chunk_off += ret;
> + xdr_off += ret;
> + xfer_len -= ret;
> + write_len -= ret;
> }
> }
> /* Update the req with the number of chunks actually used */
>
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
next prev parent reply other threads:[~2014-07-10 20:17 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-09 18:49 [PATCH] svcrdma: send_write() must not overflow the device's max sge Steve Wise
[not found] ` <20140709184914.22217.59770.stgit-T4OLL4TyM9aNDNWfRnPdfg@public.gmane.org>
2014-07-10 19:51 ` Steve Wise
2014-07-10 20:17 ` J. Bruce Fields [this message]
2014-07-16 6:16 ` Devesh Sharma
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20140710201702.GG26561@fieldses.org \
--to=bfields-uc3wqj2krung9huczpvpmw@public.gmane.org \
--cc=linux-nfs-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org \
--cc=tom-/Yg/VP3ZvrM@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox