From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, zyjzyj2000@gmail.com, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearson@hpe.com>
Subject: [PATCH for-next v5 08/12] rdma_rxe: Add support for extended CQ operations
Date: Fri, 18 Sep 2020 16:15:13 -0500 [thread overview]
Message-ID: <20200918211517.5295-9-rpearson@hpe.com> (raw)
In-Reply-To: <20200918211517.5295-1-rpearson@hpe.com>
Add private members to user/kernel wc struct to carry
extensions used by cq_ex.
Add timestamps on completion.
Add ignore overrun support.
Add commands to user API bitmasks.
Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
drivers/infiniband/sw/rxe/rxe_comp.c | 7 ++++-
drivers/infiniband/sw/rxe/rxe_resp.c | 20 +++++++++-----
drivers/infiniband/sw/rxe/rxe_verbs.c | 10 ++++---
drivers/infiniband/sw/rxe/rxe_verbs.h | 3 ++-
include/uapi/rdma/rdma_user_rxe.h | 38 ++++++++++++++++++++++-----
5 files changed, 58 insertions(+), 20 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 8b81d3b24a8a..72745ffcf118 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -390,7 +390,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
wc->byte_len = wqe->dma.length;
wc->qp = &qp->ibqp;
} else {
- struct ib_uverbs_wc *uwc = &cqe->uibwc;
+ struct rxe_uverbs_wc *uwc = &cqe->ruwc;
uwc->wr_id = wqe->wr.wr_id;
uwc->status = wqe->status;
@@ -400,6 +400,11 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
uwc->wc_flags = IB_WC_WITH_IMM;
uwc->byte_len = wqe->dma.length;
uwc->qp_num = qp->ibqp.qp_num;
+ if (qp->scq->flags &
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ uwc->timestamp = (u64)ktime_get();
+ uwc->realtime = (u64)ktime_get_real();
+ }
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 136c7699fed3..660f33318ec9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -844,7 +844,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
{
struct rxe_cqe cqe;
struct ib_wc *wc = &cqe.ibwc;
- struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ struct rxe_uverbs_wc *uwc = &cqe.ruwc;
struct rxe_recv_wqe *wqe = qp->resp.wqe;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
@@ -854,13 +854,13 @@ static enum resp_states do_complete(struct rxe_qp *qp,
memset(&cqe, 0, sizeof(cqe));
if (qp->rcq->is_user) {
- uwc->status = qp->resp.status;
- uwc->qp_num = qp->ibqp.qp_num;
- uwc->wr_id = wqe->wr_id;
+ uwc->status = qp->resp.status;
+ uwc->qp_num = qp->ibqp.qp_num;
+ uwc->wr_id = wqe->wr_id;
} else {
- wc->status = qp->resp.status;
- wc->qp = &qp->ibqp;
- wc->wr_id = wqe->wr_id;
+ wc->status = qp->resp.status;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wqe->wr_id;
}
if (wc->status == IB_WC_SUCCESS) {
@@ -895,6 +895,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
uwc->src_qp = deth_sqp(pkt);
uwc->port_num = qp->attr.port_num;
+
+ if (qp->rcq->flags &
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ uwc->timestamp = (u64)ktime_get();
+ uwc->realtime = (u64)ktime_get_real();
+ }
} else {
struct sk_buff *skb = PKT_TO_SKB(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index a77f2e0ef68f..594d8353600a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -749,7 +749,8 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
return err;
}
-static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+static int rxe_create_cq(struct ib_cq *ibcq,
+ const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
int err;
@@ -764,13 +765,12 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
uresp = udata->outbuf;
}
- if (attr->flags)
- return -EINVAL;
-
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err)
return err;
+ cq->flags = attr->flags;
+
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
uresp);
if (err)
@@ -1187,6 +1187,8 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
dev->uverbs_ex_cmd_mask =
BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE)
+ | BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ)
+ | BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_CQ)
;
ib_set_device_ops(dev, &rxe_dev_ops);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index b24a9a0878c2..784ae4102265 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -53,7 +53,7 @@ struct rxe_ah {
struct rxe_cqe {
union {
struct ib_wc ibwc;
- struct ib_uverbs_wc uibwc;
+ struct rxe_uverbs_wc ruwc;
};
};
@@ -62,6 +62,7 @@ struct rxe_cq {
struct rxe_pool_entry pelem;
struct rxe_queue *queue;
spinlock_t cq_lock;
+ u32 flags;
u8 notify;
bool is_dying;
int is_user;
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index d49125682359..95352e050ab4 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -98,29 +98,27 @@ struct rxe_send_wr {
__aligned_u64 length;
union {
__u32 mr_index;
- __aligned_u64 reserved1;
+ __aligned_u64 pad1;
};
union {
__u32 mw_index;
- __aligned_u64 reserved2;
+ __aligned_u64 pad2;
};
__u32 rkey;
__u32 access;
__u32 flags;
} umw;
- /* The following are only used by the kernel
- * and are not part of the uapi
- */
+ /* below are only used by the kernel */
struct {
__aligned_u64 addr;
__aligned_u64 length;
union {
struct ib_mr *mr;
- __aligned_u64 reserved1;
+ __aligned_u64 reserved1;
};
union {
struct ib_mw *mw;
- __aligned_u64 reserved2;
+ __aligned_u64 reserved2;
};
__u32 rkey;
__u32 access;
@@ -184,6 +182,32 @@ struct rxe_recv_wqe {
struct rxe_dma_info dma;
};
+struct rxe_uverbs_wc {
+ /* keep these the same as ib_uverbs_wc */
+ __aligned_u64 wr_id;
+ __u32 status;
+ __u32 opcode;
+ __u32 vendor_err;
+ __u32 byte_len;
+ union {
+ __be32 imm_data;
+ __u32 invalidate_rkey;
+ } ex;
+ __u32 qp_num;
+ __u32 src_qp;
+ __u32 wc_flags;
+ __u16 pkey_index;
+ __u16 slid;
+ __u8 sl;
+ __u8 dlid_path_bits;
+ __u8 port_num;
+ __u8 reserved;
+
+ /* any extras go here */
+ __aligned_u64 timestamp;
+ __aligned_u64 realtime;
+};
+
struct rxe_create_cq_resp {
struct mminfo mi;
};
--
2.25.1
next prev parent reply other threads:[~2020-09-18 21:16 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-18 21:15 [PATCH for-next v5 00/12] rdma_rxe: API extensions Bob Pearson
2020-09-18 21:15 ` [PATCH for-next v5 01/12] rdma_rxe: Separat MEM into MR and MW objects Bob Pearson
2020-09-18 21:15 ` [PATCH for-next v5 02/12] rdma_rxe: Enable " Bob Pearson
2020-09-18 21:15 ` [PATCH for-next v5 03/12] rdma_rxe: Let pools support both keys and indices Bob Pearson
2020-09-18 21:15 ` [PATCH for-next v5 04/12] rdma_rxe: Add alloc_mw and dealloc_mw verbs Bob Pearson
2020-09-18 21:15 ` [PATCH for-next v5 05/12] rdma_rxe: Add bind_mw and invalidate_mw verbs Bob Pearson
2020-09-18 21:15 ` [PATCH for-next v5 06/12] rdma_rxe: Add memory access through MWs Bob Pearson
2020-09-19 3:52 ` kernel test robot
2020-09-18 21:15 ` [PATCH for-next v5 07/12] rdma_rxe: Add support for ibv_query_device_ex Bob Pearson
2020-09-18 23:40 ` Jason Gunthorpe
2020-09-18 21:15 ` Bob Pearson [this message]
2020-09-18 21:15 ` [PATCH for-next v5 09/12] rdma_rxe: Add support for extended QP operations Bob Pearson
2020-09-18 23:40 ` Jason Gunthorpe
2020-09-18 21:15 ` [PATCH for-next v5 10/12] rdma_rxe: Fix pool related bugs Bob Pearson
2020-09-18 23:49 ` Jason Gunthorpe
2020-09-18 21:15 ` [PATCH for-next v5 11/12] rdma_rxe: Fix mcast group allocation bug Bob Pearson
2020-09-18 21:15 ` [PATCH for-next v5 12/12] rdma_rxe: Fix bugs in the multicast receive path Bob Pearson
2020-09-18 23:51 ` [PATCH for-next v5 00/12] rdma_rxe: API extensions Jason Gunthorpe
2020-09-19 8:44 ` Zhu Yanjun
2020-09-19 8:46 ` Zhu Yanjun
2020-09-20 21:13 ` Bob Pearson
2020-09-21 14:10 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200918211517.5295-9-rpearson@hpe.com \
--to=rpearsonhpe@gmail.com \
--cc=jgg@nvidia.com \
--cc=linux-rdma@vger.kernel.org \
--cc=rpearson@hpe.com \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).