From: Selvin Xavier <selvin.xavier@broadcom.com>
To: dledford@redhat.com, linux-rdma@vger.kernel.org
Cc: netdev@vger.kernel.org,
Selvin Xavier <selvin.xavier@broadcom.com>,
Eddie Wai <eddie.wai@broadcom.com>,
Devesh Sharma <devesh.sharma@broadcom.com>,
Somnath Kotur <somnath.kotur@broadcom.com>,
Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Subject: [PATCH V2 15/22] bnxt_re: Support post_recv
Date: Thu, 8 Dec 2016 22:48:09 -0800 [thread overview]
Message-ID: <1481266096-23331-16-git-send-email-selvin.xavier@broadcom.com> (raw)
In-Reply-To: <1481266096-23331-1-git-send-email-selvin.xavier@broadcom.com>
Enables the fastpath verb ib_post_recv.
Signed-off-by: Eddie Wai <eddie.wai@broadcom.com>
Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.c | 100 ++++++++++++++++++
drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.h | 8 ++
drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.c | 133 ++++++++++++++++++++++++
drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.h | 2 +
drivers/infiniband/hw/bnxtre/bnxt_re_main.c | 2 +
5 files changed, 245 insertions(+)
diff --git a/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.c b/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.c
index 419efe2..67188ce 100644
--- a/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.c
+++ b/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.c
@@ -1107,6 +1107,37 @@ void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
return NULL;
}
+u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+
+ return HWQ_CMP(rq->hwq.prod, &rq->hwq);
+}
+
+dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
+{
+ return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
+}
+
+void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+ u32 sw_prod;
+
+ memset(sge, 0, sizeof(*sge));
+
+ if (qp->rq_hdr_buf) {
+ sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
+ sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
+ sw_prod * qp->rq_hdr_buf_size);
+ sge->lkey = 0xFFFFFFFF;
+ sge->size = qp->rq_hdr_buf_size;
+ return qp->rq_hdr_buf + sw_prod * sge->size;
+ }
+ return NULL;
+}
+
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *sq = &qp->sq;
@@ -1355,6 +1386,75 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
return rc;
}
+void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct dbr_dbr db_msg = { 0 };
+ u32 sw_prod;
+
+ sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
+ db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
+ DBR_DBR_INDEX_MASK);
+ db_msg.type_xid =
+ cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
+ DBR_DBR_TYPE_RQ);
+
+ /* Flush the writes to HW Rx WQE before the ringing Rx DB */
+ wmb();
+ __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
+}
+
+int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct rq_wqe *rqe, **rqe_ptr;
+ struct sq_sge *hw_sge;
+ u32 sw_prod;
+ int i, rc = 0;
+
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ dev_err(&rq->hwq.pdev->dev,
+ "QPLIB: FP: QP (0x%x) is in the 0x%x state",
+ qp->id, qp->state);
+ rc = -EINVAL;
+ goto done;
+ }
+ if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) ==
+ HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
+ dev_err(&rq->hwq.pdev->dev,
+ "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
+ rc = -EINVAL;
+ goto done;
+ }
+ sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
+ rq->swq[sw_prod].wr_id = wqe->wr_id;
+
+ rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
+ rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
+
+ memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+
+ /* Calculate wqe_size16 and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
+ i < wqe->num_sge; i++, hw_sge++) {
+ hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
+ hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
+ hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
+ }
+ rqe->wqe_type = wqe->type;
+ rqe->flags = wqe->flags;
+ rqe->wqe_size = wqe->num_sge +
+ ((offsetof(typeof(*rqe), data) + 15) >> 4);
+
+ /* Supply the rqe->wr_id index to the wr_id_tbl for now */
+ rqe->wr_id[0] = cpu_to_le32(sw_prod);
+
+ rq->hwq.prod++;
+done:
+ return rc;
+}
+
/* CQ */
/* Spinlock must be held */
diff --git a/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.h b/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.h
index 7fe98db..d9f2611 100644
--- a/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.h
+++ b/drivers/infiniband/hw/bnxtre/bnxt_qplib_fp.h
@@ -395,9 +395,17 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
+void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge);
+u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
+dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp,
+ u32 index);
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe);
+void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
+int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
diff --git a/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.c b/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.c
index 540f2f2..5d6c89c 100644
--- a/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.c
+++ b/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.c
@@ -1644,6 +1644,61 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
return rc;
}
+/* For the MAD layer, it only provides the recv SGE the size of
+ * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
+ * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
+ * receive packet (334 bytes) with no VLAN and then copy the GRH
+ * and the MAD datagram out to the provided SGE.
+ */
+static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
+ struct ib_recv_wr *wr,
+ struct bnxt_qplib_swqe *wqe,
+ int payload_size)
+{
+ struct bnxt_qplib_sge ref, sge;
+ int rc = 0;
+ u32 rq_prod_index;
+ struct bnxt_re_sqp_entries *sqp_entry;
+
+ rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
+
+ if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
+ /* Create 1 SGE to receive the entire
+ * ethernet packet
+ */
+ /* Save the reference from ULP */
+ ref.addr = wqe->sg_list[0].addr;
+ ref.lkey = wqe->sg_list[0].lkey;
+ ref.size = wqe->sg_list[0].size;
+
+ sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
+
+ /* SGE 1 */
+ wqe->sg_list[0].addr = sge.addr;
+ wqe->sg_list[0].lkey = sge.lkey;
+ wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ sge.size -= wqe->sg_list[0].size;
+ if (sge.size < 0) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "QP1 rq buffer is empty!");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ sqp_entry->sge.addr = ref.addr;
+ sqp_entry->sge.lkey = ref.lkey;
+ sqp_entry->sge.size = ref.size;
+ /* Store the wrid for reporting completion */
+ sqp_entry->wrid = wqe->wr_id;
+ /* change the wqe->wrid to table index */
+ wqe->wr_id = rq_prod_index;
+ }
+ return 0;
+done:
+
+ return rc;
+}
+
static int is_ud_qp(struct bnxt_re_qp *qp)
{
return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
@@ -1988,6 +2043,84 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
return rc;
}
+int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp,
+ struct ib_recv_wr *wr)
+{
+ struct bnxt_qplib_swqe wqe;
+ int rc = 0, payload_sz = 0;
+
+ memset(&wqe, 0, sizeof(wqe));
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+
+ /* Common */
+ wqe.num_sge = wr->num_sge;
+ if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
+ dev_err(rdev_to_dev(rdev),
+ "Limit exceeded for Receive SGEs");
+ rc = -EINVAL;
+ goto bad;
+ }
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
+ wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ if (!rc)
+ rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
+bad:
+ if (rc)
+ break;
+
+ wr = wr->next;
+ }
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ return rc;
+}
+
+int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_swqe wqe;
+ int rc = 0, payload_sz = 0;
+
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+
+ /* Common */
+ wqe.num_sge = wr->num_sge;
+ if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "Limit exceeded for Receive SGEs");
+ rc = -EINVAL;
+ goto bad;
+ }
+
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
+ wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ if (ib_qp->qp_type == IB_QPT_GSI)
+ rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
+ payload_sz);
+ if (!rc)
+ rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
+bad:
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ return rc;
+}
+
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
{
diff --git a/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.h b/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.h
index becdcdc..9f3dd49 100644
--- a/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.h
+++ b/drivers/infiniband/hw/bnxtre/bnxt_re_ib_verbs.h
@@ -164,6 +164,8 @@ int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int bnxt_re_destroy_qp(struct ib_qp *qp);
int bnxt_re_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr);
+int bnxt_re_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
diff --git a/drivers/infiniband/hw/bnxtre/bnxt_re_main.c b/drivers/infiniband/hw/bnxtre/bnxt_re_main.c
index 14d1147..73dfadd 100644
--- a/drivers/infiniband/hw/bnxtre/bnxt_re_main.c
+++ b/drivers/infiniband/hw/bnxtre/bnxt_re_main.c
@@ -452,6 +452,8 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->destroy_qp = bnxt_re_destroy_qp;
ibdev->post_send = bnxt_re_post_send;
+ ibdev->post_recv = bnxt_re_post_recv;
+
ibdev->create_cq = bnxt_re_create_cq;
ibdev->destroy_cq = bnxt_re_destroy_cq;
ibdev->req_notify_cq = bnxt_re_req_notify_cq;
--
2.5.5
next prev parent reply other threads:[~2016-12-09 6:48 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-12-09 6:47 [PATCH V2 00/22] Broadcom RoCE Driver (bnxt_re) Selvin Xavier
2016-12-09 6:47 ` [PATCH V2 01/22] bnxt_re: Add bnxt_re RoCE driver files Selvin Xavier
2016-12-09 6:47 ` [PATCH V2 02/22] bnxt_re: Introducing autogenerated Host Software Interface(hsi) file Selvin Xavier
2016-12-09 6:47 ` [PATCH V2 03/22] bnxt_re: register with the NIC driver Selvin Xavier
2016-12-10 0:03 ` Jonathan Toppins
2016-12-09 6:47 ` [PATCH V2 04/22] bnxt_re: Enabling RoCE control path Selvin Xavier
2016-12-09 6:47 ` [PATCH V2 05/22] bnxt_re: Adding Notification Queue support Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 06/22] bnxt_re: Support for PD, ucontext and mmap verbs Selvin Xavier
[not found] ` <1481266096-23331-1-git-send-email-selvin.xavier-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
2016-12-09 6:48 ` [PATCH V2 07/22] bnxt_re: Support for query and modify device verbs Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 10/22] bnxt_re: Support for CQ verbs Selvin Xavier
[not found] ` <1481266096-23331-11-git-send-email-selvin.xavier-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
2016-12-12 21:03 ` Jonathan Toppins
2016-12-09 6:48 ` [PATCH V2 22/22] bnxt_re: Add bnxt_re driver build support Selvin Xavier
2016-12-09 11:21 ` kbuild test robot
2016-12-10 5:36 ` [PATCH V2 00/22] Broadcom RoCE Driver (bnxt_re) Selvin Xavier
[not found] ` <CA+sbYW1ZEa5fndGkvN8OXr-orcUx4jaL73Di8zBJQX_uCdK=Ww-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2016-12-12 17:07 ` Jason Gunthorpe
[not found] ` <20161212170701.GA28387-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2016-12-13 6:04 ` Selvin Xavier
2016-12-12 16:54 ` Jonathan Toppins
[not found] ` <9cf03e2b-a16d-19ec-a8ce-14f24272bf6a-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2016-12-13 4:52 ` Selvin Xavier
2016-12-13 6:41 ` Michael Chan
2016-12-12 23:52 ` Doug Ledford
2016-12-13 3:54 ` Selvin Xavier
[not found] ` <23e26353-4317-2836-9f94-d1fc3274a770-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2016-12-13 7:59 ` Or Gerlitz
[not found] ` <CAJ3xEMh98kC1KXGf7uHKD-H91f_NiZXaz-3yTtwQ2s-D7rYqMQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2016-12-13 8:36 ` Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 08/22] bnxt_re: Adding support for port related verbs Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 09/22] bnxt_re: Support for GID " Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 11/22] bnxt_re: Support for AH verbs Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 12/22] bnxt_re: Support memory registration verbs Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 13/22] bnxt_re: Support QP verbs Selvin Xavier
[not found] ` <1481266096-23331-14-git-send-email-selvin.xavier-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
2016-12-12 18:27 ` Leon Romanovsky
[not found] ` <20161212182737.GC8204-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2016-12-13 6:08 ` Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 14/22] bnxt_re: Support post_send verb Selvin Xavier
2016-12-09 6:48 ` Selvin Xavier [this message]
2016-12-09 6:48 ` [PATCH V2 16/22] bnxt_re: Support poll_cq verb Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 17/22] bnxt_re: Handling dispatching of events to IB stack Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 18/22] bnxt_re: Support for DCB Selvin Xavier
2016-12-10 13:50 ` Or Gerlitz
2016-12-13 6:25 ` Selvin Xavier
[not found] ` <CA+sbYW1irBd0cTqJJSGJWRbBi-iFzvX3JpoTfF_daU47EqNtAg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2016-12-13 16:56 ` Jason Gunthorpe
2016-12-09 6:48 ` [PATCH V2 19/22] bnxt_re: Support debugfs Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 20/22] bnxt_re: Set uverbs command mask Selvin Xavier
2016-12-09 6:48 ` [PATCH V2 21/22] bnxt_re: Add QP event handling Selvin Xavier
[not found] ` <1481266096-23331-22-git-send-email-selvin.xavier-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
2016-12-09 12:12 ` Sergei Shtylyov
2016-12-09 15:26 ` [PATCH V2 00/22] Broadcom RoCE Driver (bnxt_re) David Miller
2016-12-09 17:52 ` Selvin Xavier
2016-12-09 16:27 ` Leon Romanovsky
2016-12-13 6:54 ` Or Gerlitz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1481266096-23331-16-git-send-email-selvin.xavier@broadcom.com \
--to=selvin.xavier@broadcom.com \
--cc=devesh.sharma@broadcom.com \
--cc=dledford@redhat.com \
--cc=eddie.wai@broadcom.com \
--cc=linux-rdma@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=somnath.kotur@broadcom.com \
--cc=sriharsha.basavapatna@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).