From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, zyjzyj2000@gmail.com, linux-rdma@vger.kernel.org,
jhack@hpe.com
Cc: Bob Pearson <rpearsonhpe@gmail.com>
Subject: [PATCH for-next v3 08/10] RDMA/rxe: Extend send/write_data_in() for frags
Date: Thu, 27 Jul 2023 15:01:27 -0500 [thread overview]
Message-ID: <20230727200128.65947-9-rpearsonhpe@gmail.com> (raw)
In-Reply-To: <20230727200128.65947-1-rpearsonhpe@gmail.com>
Extend send_data_in() and write_data_in() in rxe_resp.c to
support fragmented received skbs.
This is in preparation for using fragmented skbs.
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
drivers/infiniband/sw/rxe/rxe_resp.c | 102 +++++++++++++++++----------
1 file changed, 64 insertions(+), 38 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index dc62e11dc448..c7153e376987 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -559,45 +559,88 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
return state;
}
-static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
- int data_len)
+/**
+ * rxe_send_data_in() - Copy payload data into receive buffer
+ * @qp: The queue pair
+ * @pkt: Request packet info
+ *
+ * Copy the packet payload into the receive buffer at the current offset.
+ * If a UD message also copy the IP header into the receive buffer.
+ *
+ * Returns: 0 if successful else an error resp_states value.
+ */
+static enum resp_states rxe_send_data_in(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+ u8 *data_addr = payload_addr(pkt);
+ int data_len = payload_size(pkt);
+ union rdma_network_hdr hdr;
+ enum rxe_mr_copy_op op;
int skb_offset = 0;
int err;
+ /* Per IBA for UD packets copy the IP header into the receive buffer */
+ if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(&hdr.reserved, 0, sizeof(hdr.reserved));
+ memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
+ } else {
+ memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr));
+ }
+ err = rxe_copy_dma_data(skb, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ &qp->resp.wqe->dma, &hdr, skb_offset,
+ sizeof(hdr), RXE_COPY_TO_MR);
+ if (err)
+ goto err_out;
+ }
+
+ op = skb_is_nonlinear(skb) ? RXE_FRAG_TO_MR : RXE_COPY_TO_MR;
+ /* offset to payload from skb->data (= &bth header) */
+ skb_offset = rxe_opcode[pkt->opcode].length;
err = rxe_copy_dma_data(skb, qp->pd, IB_ACCESS_LOCAL_WRITE,
&qp->resp.wqe->dma, data_addr,
- skb_offset, data_len, RXE_COPY_TO_MR);
- if (unlikely(err))
- return (err == -ENOSPC) ? RESPST_ERR_LENGTH
- : RESPST_ERR_MALFORMED_WQE;
+ skb_offset, data_len, op);
+ if (err)
+ goto err_out;
return RESPST_NONE;
+
+err_out:
+ return (err == -ENOSPC) ? RESPST_ERR_LENGTH
+ : RESPST_ERR_MALFORMED_WQE;
}
-static enum resp_states write_data_in(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+/**
+ * rxe_write_data_in() - Copy payload data to iova
+ * @qp: The queue pair
+ * @pkt: Request packet info
+ *
+ * Copy the packet payload to current iova and update iova.
+ *
+ * Returns: 0 if successful else an error resp_states value.
+ */
+static enum resp_states rxe_write_data_in(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
{
struct sk_buff *skb = PKT_TO_SKB(pkt);
- enum resp_states rc = RESPST_NONE;
+ u8 *data_addr = payload_addr(pkt);
int data_len = payload_size(pkt);
+ enum rxe_mr_copy_op op;
+ int skb_offset;
int err;
- int skb_offset = 0;
+ op = skb_is_nonlinear(skb) ? RXE_FRAG_TO_MR : RXE_COPY_TO_MR;
+ skb_offset = rxe_opcode[pkt->opcode].length;
err = rxe_copy_mr_data(skb, qp->resp.mr, qp->resp.va + qp->resp.offset,
- payload_addr(pkt), skb_offset, data_len,
- RXE_COPY_TO_MR);
- if (err) {
- rc = RESPST_ERR_RKEY_VIOLATION;
- goto out;
- }
+ data_addr, skb_offset, data_len, op);
+ if (err)
+ return RESPST_ERR_RKEY_VIOLATION;
qp->resp.va += data_len;
qp->resp.resid -= data_len;
-out:
- return rc;
+ return RESPST_NONE;
}
static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
@@ -991,30 +1034,13 @@ static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{
enum resp_states err;
- struct sk_buff *skb = PKT_TO_SKB(pkt);
- union rdma_network_hdr hdr;
if (pkt->mask & RXE_SEND_MASK) {
- if (qp_type(qp) == IB_QPT_UD ||
- qp_type(qp) == IB_QPT_GSI) {
- if (skb->protocol == htons(ETH_P_IP)) {
- memset(&hdr.reserved, 0,
- sizeof(hdr.reserved));
- memcpy(&hdr.roce4grh, ip_hdr(skb),
- sizeof(hdr.roce4grh));
- err = send_data_in(qp, &hdr, sizeof(hdr));
- } else {
- err = send_data_in(qp, ipv6_hdr(skb),
- sizeof(hdr));
- }
- if (err)
- return err;
- }
- err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
+ err = rxe_send_data_in(qp, pkt);
if (err)
return err;
} else if (pkt->mask & RXE_WRITE_MASK) {
- err = write_data_in(qp, pkt);
+ err = rxe_write_data_in(qp, pkt);
if (err)
return err;
} else if (pkt->mask & RXE_READ_MASK) {
--
2.39.2
next prev parent reply other threads:[~2023-07-27 20:02 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-27 20:01 [PATCH for-next v3 00/10] RDMA/rxe: Implement support for nonlinear packets Bob Pearson
2023-07-27 20:01 ` [PATCH for-next v3 01/10] RDMA/rxe: Add sg fragment ops Bob Pearson
2023-07-28 1:07 ` Zhu Yanjun
2023-07-28 1:49 ` Bob Pearson
2023-07-27 20:01 ` [PATCH for-next v3 02/10] RDMA/rxe: Extend rxe_mr_copy to support skb frags Bob Pearson
2023-07-27 20:01 ` [PATCH for-next v3 03/10] RDMA/rxe: Extend copy_data " Bob Pearson
2023-07-27 20:01 ` [PATCH for-next v3 04/10] RDMA/rxe: Extend rxe_init_packet() to support frags Bob Pearson
2023-07-27 20:01 ` [PATCH for-next v3 05/10] RDMA/rxe: Extend rxe_icrc.c " Bob Pearson
2023-07-28 14:20 ` Zhu Yanjun
2023-07-28 14:49 ` Bob Pearson
2023-07-28 23:39 ` Zhu Yanjun
2023-07-27 20:01 ` [PATCH for-next v3 06/10] RDMA/rxe: Extend rxe_init_req_packet() for frags Bob Pearson
2023-07-27 20:01 ` [PATCH for-next v3 07/10] RDMA/rxe: Extend response packets " Bob Pearson
2023-07-27 20:01 ` Bob Pearson [this message]
2023-07-27 20:01 ` [PATCH for-next v3 09/10] RDMA/rxe: Extend do_read() in rxe_comp.c " Bob Pearson
2023-07-27 20:01 ` [PATCH for-next v3 10/10] RDMA/rxe: Enable sg code in rxe Bob Pearson
2023-08-15 19:07 ` Jason Gunthorpe
2023-07-28 0:40 ` [PATCH for-next v3 00/10] RDMA/rxe: Implement support for nonlinear packets Zhu Yanjun
2023-07-28 1:54 ` Bob Pearson
2023-08-15 19:08 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230727200128.65947-9-rpearsonhpe@gmail.com \
--to=rpearsonhpe@gmail.com \
--cc=jgg@nvidia.com \
--cc=jhack@hpe.com \
--cc=linux-rdma@vger.kernel.org \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox