From: Ye Bin <yebin@huaweicloud.com>
To: ericvh@gmail.com, lucho@ionkov.net, asmadeus@codewreck.org,
linux_oss@crudebyte.com, davem@davemloft.net,
edumazet@google.com, kuba@kernel.org, pabeni@redhat.com,
v9fs-developer@lists.sourceforge.net, netdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, yebin10@huawei.com
Subject: [PATCH 5/5] 9p: refactor 'post_recv()'
Date: Mon, 21 Nov 2022 16:00:49 +0800 [thread overview]
Message-ID: <20221121080049.3850133-6-yebin@huaweicloud.com> (raw)
In-Reply-To: <20221121080049.3850133-1-yebin@huaweicloud.com>
From: Ye Bin <yebin10@huawei.com>
Refactor 'post_recv()', move receive resource request from 'rdma_request()' to
'post_recv()'.
Signed-off-by: Ye Bin <yebin10@huawei.com>
---
net/9p/trans_rdma.c | 77 +++++++++++++++++++++++----------------------
1 file changed, 39 insertions(+), 38 deletions(-)
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index bb917389adc9..78452c289f35 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -380,19 +380,40 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
kfree(rdma);
}
-static int
-post_recv(struct p9_client *client, struct p9_rdma_context *c)
+static int post_recv(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_rdma *rdma = client->trans;
+ struct p9_rdma_context *c = NULL;
struct ib_recv_wr wr;
struct ib_sge sge;
- int err = -EIO;
+ int err;
+
+ c = kmalloc(sizeof *c, GFP_NOFS);
+ if (!c) {
+ err = -ENOMEM;
+ goto error;
+ }
+ c->rc.sdata = req->rc.sdata;
+
+ /*
+ * Post a receive buffer for this request. We need to ensure
+ * there is a reply buffer available for every outstanding
+ * request. A flushed request can result in no reply for an
+ * outstanding request, so we must keep a count to avoid
+ * overflowing the RQ.
+ */
+ if (down_interruptible(&rdma->rq_sem)) {
+ err = -EINTR;
+ goto error;
+ }
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc.sdata, client->msize,
DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
- goto error;
+ if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
+ err = -EIO;
+ goto sem_error;
+ }
c->cqe.done = recv_done;
@@ -405,15 +426,18 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
wr.sg_list = &sge;
wr.num_sge = 1;
err = ib_post_recv(rdma->qp, &wr, NULL);
- if (err) {
- ib_dma_unmap_single(rdma->cm_id->device, c->busa,
- client->msize, DMA_FROM_DEVICE);
- goto error;
- }
+ if (err)
+ goto mapping_error;
+
return 0;
- error:
+
+mapping_error:
+ ib_dma_unmap_single(rdma->cm_id->device, c->busa,
+ client->msize, DMA_FROM_DEVICE);
+sem_error:
up(&rdma->rq_sem);
- p9_debug(P9_DEBUG_ERROR, "EIO\n");
+error:
+ kfree(c);
return err;
}
@@ -481,9 +505,8 @@ static int post_send(struct p9_client *client, struct p9_req_t *req)
static int rdma_request(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_rdma *rdma = client->trans;
- int err = 0;
unsigned long flags;
- struct p9_rdma_context *rpl_context = NULL;
+ int err;
/* When an error occurs between posting the recv and the send,
* there will be a receive context posted without a pending request.
@@ -505,27 +528,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
}
}
- /* Allocate an fcall for the reply */
- rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
- if (!rpl_context) {
- err = -ENOMEM;
- goto recv_error;
- }
- rpl_context->rc.sdata = req->rc.sdata;
-
- /*
- * Post a receive buffer for this request. We need to ensure
- * there is a reply buffer available for every outstanding
- * request. A flushed request can result in no reply for an
- * outstanding request, so we must keep a count to avoid
- * overflowing the RQ.
- */
- if (down_interruptible(&rdma->rq_sem)) {
- err = -EINTR;
- goto recv_error;
- }
-
- err = post_recv(client, rpl_context);
+ err = post_recv(client, req);
if (err) {
p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
goto recv_error;
@@ -547,9 +550,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
}
return err;
- /* Handle errors that happened during or while preparing post_recv(): */
- recv_error:
- kfree(rpl_context);
+recv_error:
spin_lock_irqsave(&rdma->req_lock, flags);
if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
rdma->state = P9_RDMA_CLOSING;
--
2.31.1
next prev parent reply other threads:[~2022-11-21 7:39 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-21 8:00 [PATCH 0/5] Fix error handle in 'rdma_request()' Ye Bin
2022-11-21 8:00 ` [PATCH 1/5] 9p: fix miss unmap request " Ye Bin
2022-11-21 8:00 ` [PATCH 2/5] 9p: fix miss release semaphore " Ye Bin
2022-11-21 8:00 ` [PATCH 3/5] 9p: fix error handle in 'post_recv()' Ye Bin
2022-11-21 8:00 ` [PATCH 4/5] 9p: factor out 'post_send()' Ye Bin
2022-11-21 8:00 ` Ye Bin [this message]
2022-11-22 0:27 ` [PATCH 0/5] Fix error handle in 'rdma_request()' asmadeus
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221121080049.3850133-6-yebin@huaweicloud.com \
--to=yebin@huaweicloud.com \
--cc=asmadeus@codewreck.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=ericvh@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux_oss@crudebyte.com \
--cc=lucho@ionkov.net \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=v9fs-developer@lists.sourceforge.net \
--cc=yebin10@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).