From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, leon@kernel.org, zyjzyj2000@gmail.com,
jhack@hpe.com, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearsonhpe@gmail.com>
Subject: [PATCH for-next 03/17] RDMA/rxe: Isolate code to build request packet
Date: Thu, 27 Oct 2022 13:54:57 -0500 [thread overview]
Message-ID: <20221027185510.33808-4-rpearsonhpe@gmail.com> (raw)
In-Reply-To: <20221027185510.33808-1-rpearsonhpe@gmail.com>
Isolate all the code to build a request packet into a single
subroutine called rxe_init_req_packet().
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
drivers/infiniband/sw/rxe/rxe_loc.h | 2 +-
drivers/infiniband/sw/rxe/rxe_net.c | 6 +-
drivers/infiniband/sw/rxe/rxe_req.c | 121 ++++++++++++---------------
drivers/infiniband/sw/rxe/rxe_resp.c | 11 +--
4 files changed, 62 insertions(+), 78 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index c2a5c8814a48..574a6afc1199 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -92,7 +92,7 @@ void rxe_mw_cleanup(struct rxe_pool_elem *elem);
/* rxe_net.c */
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
- int paylen, struct rxe_pkt_info *pkt);
+ struct rxe_pkt_info *pkt);
int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
struct sk_buff *skb);
int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 35f327b9d4b8..1e4456f5cda2 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -443,7 +443,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
}
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
- int paylen, struct rxe_pkt_info *pkt)
+ struct rxe_pkt_info *pkt)
{
unsigned int hdr_len;
struct sk_buff *skb = NULL;
@@ -468,7 +468,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
rcu_read_unlock();
goto out;
}
- skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
+ skb = alloc_skb(pkt->paylen + hdr_len + LL_RESERVED_SPACE(ndev),
GFP_ATOMIC);
if (unlikely(!skb)) {
@@ -489,7 +489,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
pkt->rxe = rxe;
pkt->port_num = port_num;
- pkt->hdr = skb_put(skb, paylen);
+ pkt->hdr = skb_put(skb, pkt->paylen);
pkt->mask |= RXE_GRH_MASK;
out:
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 10a75f4e3608..8cc683ebf536 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -456,51 +456,76 @@ static int rxe_init_payload(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
return err;
}
-static struct sk_buff *init_req_packet(struct rxe_qp *qp,
- struct rxe_av *av,
- struct rxe_send_wqe *wqe,
- int opcode, u32 payload,
- struct rxe_pkt_info *pkt)
+static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ int opcode, u32 payload,
+ struct rxe_pkt_info *pkt)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct sk_buff *skb;
- int pad = (-payload) & 0x3;
- int paylen;
+ struct rxe_av *av;
+ struct rxe_ah *ah;
+ void *padp;
+ int pad;
+ int err = -EINVAL;
+
+ pkt->rxe = rxe;
+ pkt->opcode = opcode;
+ pkt->qp = qp;
+ pkt->psn = qp->req.psn;
+ pkt->mask = rxe_opcode[opcode].mask;
+ pkt->wqe = wqe;
+ pkt->port_num = 1;
+
+ /* get address vector and address handle for UD qps only */
+ av = rxe_get_av(pkt, &ah);
+ if (unlikely(!av))
+ goto err_out;
/* length from start of bth to end of icrc */
- paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
- pkt->paylen = paylen;
+ pad = (-payload) & 0x3;
+ pkt->paylen = rxe_opcode[opcode].length + payload +
+ pad + RXE_ICRC_SIZE;
/* init skb */
- skb = rxe_init_packet(rxe, av, paylen, pkt);
+ skb = rxe_init_packet(rxe, av, pkt);
if (unlikely(!skb))
- return NULL;
+ goto err_out;
rxe_init_roce_hdrs(qp, wqe, pkt, pad);
- return skb;
-}
+ if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
+ err = rxe_init_payload(qp, wqe, pkt, payload);
+ if (err)
+ goto err_out;
+ }
-static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
- struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 payload)
-{
- int err;
+ if (pad) {
+ padp = payload_addr(pkt) + payload;
+ memset(padp, 0, pad);
+ }
+ /* IP and UDP network headers */
err = rxe_prepare(av, pkt, skb);
if (err)
- return err;
+ goto err_out;
- if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
- err = rxe_init_payload(qp, wqe, pkt, payload);
- if (bth_pad(pkt)) {
- u8 *pad = payload_addr(pkt) + payload;
+ if (ah)
+ rxe_put(ah);
- memset(pad, 0, bth_pad(pkt));
- }
- }
+ return skb;
- return 0;
+err_out:
+ if (err == -EFAULT)
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ else
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ if (skb)
+ kfree_skb(skb);
+ if (ah)
+ rxe_put(ah);
+
+ return NULL;
}
static void update_wqe_state(struct rxe_qp *qp,
@@ -630,7 +655,6 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
int rxe_requester(void *arg)
{
struct rxe_qp *qp = (struct rxe_qp *)arg;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_pkt_info pkt;
struct sk_buff *skb;
struct rxe_send_wqe *wqe;
@@ -643,8 +667,6 @@ int rxe_requester(void *arg)
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
struct rxe_queue *q = qp->sq.queue;
- struct rxe_ah *ah;
- struct rxe_av *av;
if (!rxe_get(qp))
return -EAGAIN;
@@ -753,44 +775,9 @@ int rxe_requester(void *arg)
payload = mtu;
}
- pkt.rxe = rxe;
- pkt.opcode = opcode;
- pkt.qp = qp;
- pkt.psn = qp->req.psn;
- pkt.mask = rxe_opcode[opcode].mask;
- pkt.wqe = wqe;
-
- av = rxe_get_av(&pkt, &ah);
- if (unlikely(!av)) {
- pr_err("qp#%d Failed no address vector\n", qp_num(qp));
- wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err;
- }
-
- skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
- if (unlikely(!skb)) {
- pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
- wqe->status = IB_WC_LOC_QP_OP_ERR;
- if (ah)
- rxe_put(ah);
- goto err;
- }
-
- err = finish_packet(qp, av, wqe, &pkt, skb, payload);
- if (unlikely(err)) {
- pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
- if (err == -EFAULT)
- wqe->status = IB_WC_LOC_PROT_ERR;
- else
- wqe->status = IB_WC_LOC_QP_OP_ERR;
- kfree_skb(skb);
- if (ah)
- rxe_put(ah);
+ skb = rxe_init_req_packet(qp, wqe, opcode, payload, &pkt);
+ if (unlikely(!skb))
goto err;
- }
-
- if (ah)
- rxe_put(ah);
/*
* To prevent a race on wqe access between requester and completer,
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 95d372db934d..a00885799619 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -665,22 +665,19 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
int pad;
int err;
- /*
- * allocate packet
- */
pad = (-payload) & 0x3;
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
- skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
- if (!skb)
- return NULL;
-
ack->qp = qp;
ack->opcode = opcode;
ack->mask = rxe_opcode[opcode].mask;
ack->paylen = paylen;
ack->psn = psn;
+ skb = rxe_init_packet(rxe, &qp->pri_av, ack);
+ if (!skb)
+ return NULL;
+
bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
qp->attr.dest_qp_num, 0, psn);
--
2.34.1
next prev parent reply other threads:[~2022-10-27 18:56 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-27 18:54 [PATCH for-next 00/17] RDMA/rxe: Enable scatter/gather support for skbs Bob Pearson
2022-10-27 18:54 ` [PATCH for-next 01/17] RDMA/rxe: Isolate code to fill request roce headers Bob Pearson
2022-10-27 18:54 ` [PATCH for-next 02/17] RDMA/rxe: Isolate request payload code in a subroutine Bob Pearson
2022-10-27 18:54 ` Bob Pearson [this message]
2022-10-30 18:52 ` [PATCH for-next 03/17] RDMA/rxe: Isolate code to build request packet kernel test robot
2022-10-27 18:54 ` [PATCH for-next 04/17] RDMA/rxe: Add sg fragment ops Bob Pearson
2022-10-27 18:54 ` [PATCH for-next 05/17] RDMA/rxe: Add rxe_add_frag() to rxe_mr.c Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 06/17] RDMA/rxe: Add routine to compute the number of frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 07/17] RDMA/rxe: Extend rxe_mr_copy to support skb frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 08/17] RDMA/rxe: Add routine to compute number of frags for dma Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 09/17] RDMA/rxe: Extend copy_data to support skb frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 10/17] RDMA/rxe: Replace rxe by qp as a parameter Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 11/17] RDMA/rxe: Extend rxe_init_packet() to support frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 12/17] RDMA/rxe: Extend rxe_icrc.c " Bob Pearson
2022-10-27 20:29 ` kernel test robot
2022-10-30 19:33 ` kernel test robot
2022-10-27 18:55 ` [PATCH for-next 13/17] RDMA/rxe: Extend rxe_init_req_packet() for frags Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 14/17] RDMA/rxe: Extend response packets " Bob Pearson
2022-10-30 20:13 ` kernel test robot
2022-10-27 18:55 ` [PATCH for-next 15/17] RDMA/rxe: Extend send/write_data_in() " Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 16/17] RDMA/rxe: Extend do_read() in rxe_comp,c " Bob Pearson
2022-10-27 18:55 ` [PATCH for-next 17/17] RDMA/rxe: Enable sg code in rxe Bob Pearson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221027185510.33808-4-rpearsonhpe@gmail.com \
--to=rpearsonhpe@gmail.com \
--cc=jgg@nvidia.com \
--cc=jhack@hpe.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox