* [PATCH for-next 0/6] Add rq inline and bugfixes for hns
@ 2017-12-23 8:22 Lijun Ou
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
0 siblings, 1 reply; 15+ messages in thread
From: Lijun Ou @ 2017-12-23 8:22 UTC (permalink / raw)
To: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc
Cc: leon-DgEjT+Ai2ygdnm+yROfE0A, linux-rdma-u79uwXL29TY76Z2rM5mHXA
This patch series add rq inline support for hip08 kernel
mode as well as fix some bugs for hip06 and hip08.
Lijun Ou (5):
RDMA/hns: Add rq inline data support for hip08 RoCE
RDMA/hns: Update the usage of sr_max and rr_max field
RDMA/hns: Set access flags of hip08 RoCE
RDMA/hns: Filter for zero length of sge in hip08 kernel mode
RDMA/hns: Assign dest_qp when deregistering mr
Yixian Liu (1):
RDMA/hns: Fix QP state judgement before sending work requests
drivers/infiniband/hw/hns/hns_roce_device.h | 19 +++
drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 1 +
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 176 ++++++++++++++++++++++------
drivers/infiniband/hw/hns/hns_roce_qp.c | 52 ++++++--
4 files changed, 207 insertions(+), 41 deletions(-)
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH for-next 1/6] RDMA/hns: Add rq inline data support for hip08 RoCE
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
@ 2017-12-23 8:22 ` Lijun Ou
[not found] ` <1514017342-91468-2-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-23 8:22 ` [PATCH for-next 2/6] RDMA/hns: Update the usage of sr_max and rr_max field Lijun Ou
` (4 subsequent siblings)
5 siblings, 1 reply; 15+ messages in thread
From: Lijun Ou @ 2017-12-23 8:22 UTC (permalink / raw)
To: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc
Cc: leon-DgEjT+Ai2ygdnm+yROfE0A, linux-rdma-u79uwXL29TY76Z2rM5mHXA
This patch mainly implement rq inline data feature for hip08
RoCE in kernel mode.
Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
drivers/infiniband/hw/hns/hns_roce_device.h | 18 +++++++++
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 63 ++++++++++++++++++++++++++++-
drivers/infiniband/hw/hns/hns_roce_qp.c | 52 ++++++++++++++++++++----
3 files changed, 125 insertions(+), 8 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index dcfd209..8d123d3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -178,6 +178,7 @@ enum {
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+ HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
};
enum hns_roce_mtt_type {
@@ -446,6 +447,21 @@ struct hns_roce_cmd_mailbox {
struct hns_roce_dev;
+struct hns_roce_rinl_sge {
+ void *addr;
+ u32 len;
+};
+
+struct hns_roce_rinl_wqe {
+ struct hns_roce_rinl_sge *sg_list;
+ u32 sge_cnt;
+};
+
+struct hns_roce_rinl_buf {
+ struct hns_roce_rinl_wqe *wqe_list;
+ u32 wqe_cnt;
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
@@ -477,6 +493,8 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+
+ struct hns_roce_rinl_buf rq_inl_buf;
};
struct hns_roce_sqp {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 4d3e976..b17dcfa 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -299,6 +299,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db rq_db;
unsigned long flags;
@@ -347,6 +348,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dseg[i].addr = 0;
}
+ /* rq support inline data */
+ sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
+
hr_qp->rq.wrid[ind] = wr->wr_id;
ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
@@ -961,7 +970,8 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
- HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+ HNS_ROCE_CAP_FLAG_RQ_INLINE;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
@@ -1476,6 +1486,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
+ struct hns_roce_rinl_sge *sge_list;
struct hns_roce_dev *hr_dev;
struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp;
@@ -1673,6 +1684,46 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
break;
}
+ if ((wc->qp->qp_type == IB_QPT_RC ||
+ wc->qp->qp_type == IB_QPT_UC) &&
+ (opcode == HNS_ROCE_V2_OPCODE_SEND ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+ (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
+ u32 wr_num, wr_cnt, sge_num, sge_cnt;
+ u32 data_len, size;
+ u8 *wqe_buf;
+
+ wr_num = (u16)roce_get_field(cqe->byte_4,
+ V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
+ wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+
+ sge_list =
+ (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+ sge_num =
+ (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+ wqe_buf = (u8 *)get_recv_wqe(*cur_qp, wr_cnt);
+ data_len = wc->byte_len;
+
+ for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len);
+ sge_cnt++) {
+ size = sge_list[sge_cnt].len < data_len ?
+ sge_list[sge_cnt].len : data_len;
+
+ memcpy((void *)sge_list[sge_cnt].addr,
+ (void *)wqe_buf, size);
+
+ data_len -= size;
+ wqe_buf += size;
+ }
+
+ if (data_len) {
+ wc->status = IB_WC_LOC_LEN_ERR;
+ return -EAGAIN;
+ }
+ }
+
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
@@ -1972,6 +2023,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
!!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -3114,6 +3166,15 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ if (hr_qp->rq_inl_buf.wqe_list) {
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ hr_qp->rq_inl_buf.wqe_list[0].sg_list = NULL;
+ hr_qp->rq_inl_buf.wqe_list = NULL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 69e2584..7c75f21b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -494,6 +494,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
int ret = 0;
u32 page_shift;
u32 npages;
+ int i;
mutex_init(&hr_qp->mutex);
spin_lock_init(&hr_qp->sq.lock);
@@ -513,18 +514,42 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ /* allocate recv inline buf */
+ hr_qp->rq_inl_buf.wqe_list = kmalloc_array(hr_qp->rq.wqe_cnt,
+ sizeof(struct hns_roce_rinl_wqe),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list)
+ goto err_out;
+
+ hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
+
+ hr_qp->rq_inl_buf.wqe_list[0].sg_list =
+ kmalloc_array(hr_qp->rq_inl_buf.wqe_cnt *
+ init_attr->cap.max_recv_sge,
+ sizeof(struct hns_roce_rinl_sge),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list)
+ goto err_wqe_list;
+
+ for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
+ hr_qp->rq_inl_buf.wqe_list[i].sg_list =
+ &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
+ init_attr->cap.max_recv_sge];
+ }
+
if (ib_pd->uobject) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT;
- goto err_out;
+ goto err_rq_sge_list;
}
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
@@ -533,7 +558,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -567,13 +592,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
/* Set SQ size */
@@ -581,7 +606,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
- goto err_out;
+ goto err_rq_sge_list;
}
/* QP doorbell register address */
@@ -597,7 +622,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
&hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -679,6 +704,19 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
else
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+err_rq_sge_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ if (hr_qp->rq_inl_buf.wqe_list) {
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ hr_qp->rq_inl_buf.wqe_list[0].sg_list = NULL;
+ }
+
+err_wqe_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ hr_qp->rq_inl_buf.wqe_list = NULL;
+ }
+
err_out:
return ret;
}
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH for-next 2/6] RDMA/hns: Update the usage of sr_max and rr_max field
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-23 8:22 ` [PATCH for-next 1/6] RDMA/hns: Add rq inline data support for hip08 RoCE Lijun Ou
@ 2017-12-23 8:22 ` Lijun Ou
[not found] ` <1514017342-91468-3-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-23 8:22 ` [PATCH for-next 3/6] RDMA/hns: Set access flags of hip08 RoCE Lijun Ou
` (3 subsequent siblings)
5 siblings, 1 reply; 15+ messages in thread
From: Lijun Ou @ 2017-12-23 8:22 UTC (permalink / raw)
To: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc
Cc: leon-DgEjT+Ai2ygdnm+yROfE0A, linux-rdma-u79uwXL29TY76Z2rM5mHXA
This patch fixes the usage with sr_max filed and rr_max of qp
context when modify qp. Its modifications include:
1. Adjust location of filling sr_max filed of qpc
2. Only assign the number of responder resource if
IB_QP_MAX_DEST_RD_ATOMIC bit is set
3. Only assign the number of outstanding resource if
IB_QP_MAX_QP_RD_ATOMIC
4. Fix the assgin algorithms for the field of sr_max
and rr_max of qp context
Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 33 ++++++++++++++++++++----------
1 file changed, 22 insertions(+), 11 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index b17dcfa..092f133 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2531,11 +2531,17 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
}
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S, 0);
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic) {
+ roce_set_field(context->byte_140_raq,
+ V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ fls(attr->max_dest_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_140_raq,
+ V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+ }
+ }
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
@@ -2625,12 +2631,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S, 0);
-
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
@@ -2834,6 +2834,17 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ fls(attr->max_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+ }
+ }
return 0;
}
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH for-next 3/6] RDMA/hns: Set access flags of hip08 RoCE
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-23 8:22 ` [PATCH for-next 1/6] RDMA/hns: Add rq inline data support for hip08 RoCE Lijun Ou
2017-12-23 8:22 ` [PATCH for-next 2/6] RDMA/hns: Update the usage of sr_max and rr_max field Lijun Ou
@ 2017-12-23 8:22 ` Lijun Ou
2017-12-23 8:22 ` [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode Lijun Ou
` (2 subsequent siblings)
5 siblings, 0 replies; 15+ messages in thread
From: Lijun Ou @ 2017-12-23 8:22 UTC (permalink / raw)
To: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc
Cc: leon-DgEjT+Ai2ygdnm+yROfE0A, linux-rdma-u79uwXL29TY76Z2rM5mHXA
This patch refactors the code of setting access flags
for RDMA operation as well as adds the scene when
attr->max_dest_rd_atomic is zero.
Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
drivers/infiniband/hw/hns/hns_roce_device.h | 1 +
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 48 +++++++++++++++++++++--------
2 files changed, 37 insertions(+), 12 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 8d123d3..4afa070 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -483,6 +483,7 @@ struct hns_roce_qp {
u8 resp_depth;
u8 state;
u32 access_flags;
+ u32 atomic_rd_en;
u32 pkey_index;
void (*event)(struct hns_roce_qp *,
enum hns_roce_event);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 092f133..869e36f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1926,6 +1926,36 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static void set_access_flags(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ const struct ib_qp_attr *attr, int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ dest_rd_atomic = !!(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
+ attr->max_dest_rd_atomic : hr_qp->resp_depth;
+
+ access_flags = !!(attr_mask & IB_QP_ACCESS_FLAGS) ?
+ attr->qp_access_flags : hr_qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
@@ -2011,18 +2041,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
-
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
@@ -2908,6 +2926,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
}
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
/* Every status migrate must change state */
roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
@@ -2924,6 +2945,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ hr_qp->atomic_rd_en = attr->qp_access_flags;
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
` (2 preceding siblings ...)
2017-12-23 8:22 ` [PATCH for-next 3/6] RDMA/hns: Set access flags of hip08 RoCE Lijun Ou
@ 2017-12-23 8:22 ` Lijun Ou
[not found] ` <1514017342-91468-5-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-23 8:22 ` [PATCH for-next 5/6] RDMA/hns: Fix QP state judgement before sending work requests Lijun Ou
2017-12-23 8:22 ` [PATCH for-next 6/6] RDMA/hns: Assign dest_qp when deregistering mr Lijun Ou
5 siblings, 1 reply; 15+ messages in thread
From: Lijun Ou @ 2017-12-23 8:22 UTC (permalink / raw)
To: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc
Cc: leon-DgEjT+Ai2ygdnm+yROfE0A, linux-rdma-u79uwXL29TY76Z2rM5mHXA
When the length of sge is zero, the driver need to filter it
Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 29 ++++++++++++++++++++---------
1 file changed, 20 insertions(+), 9 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 869e36f..a1d8ca0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -230,26 +230,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
} else {
if (wr->num_sge <= 2) {
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg,
+ wr->sg_list + i);
+ dseg++;
+ }
+ }
} else {
roce_set_field(rc_sq_wqe->byte_20,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
sge_ind & (qp->sge.sge_cnt - 1));
- for (i = 0; i < 2; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
+ for (i = 0; i < 2; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg,
+ wr->sg_list + i);
+ dseg++;
+ }
+ }
dseg = get_send_extend_sge(qp,
sge_ind & (qp->sge.sge_cnt - 1));
for (i = 0; i < wr->num_sge - 2; i++) {
- set_data_seg_v2(dseg + i,
- wr->sg_list + 2 + i);
- sge_ind++;
+ if (likely(wr->sg_list[i + 2].length)) {
+ set_data_seg_v2(dseg,
+ wr->sg_list + 2 + i);
+ dseg++;
+ sge_ind++;
+ }
}
}
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH for-next 5/6] RDMA/hns: Fix QP state judgement before sending work requests
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
` (3 preceding siblings ...)
2017-12-23 8:22 ` [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode Lijun Ou
@ 2017-12-23 8:22 ` Lijun Ou
2017-12-23 8:22 ` [PATCH for-next 6/6] RDMA/hns: Assign dest_qp when deregistering mr Lijun Ou
5 siblings, 0 replies; 15+ messages in thread
From: Lijun Ou @ 2017-12-23 8:22 UTC (permalink / raw)
To: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc
Cc: leon-DgEjT+Ai2ygdnm+yROfE0A, linux-rdma-u79uwXL29TY76Z2rM5mHXA
From: Yixian Liu <liuyixian-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
The QP can accept send work requests only when the QP is
in the states that allow them to be submitted.
This patch updates the QP state judgement based on the
specification.
Signed-off-by: Yixian Liu <liuyixian-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
Signed-off-by: Shaobo Xu <xushaobo2-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a1d8ca0..849c333 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -76,7 +76,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return -EOPNOTSUPP;
}
- if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+ if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
+ qp->state == IB_QPS_RTR)) {
dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
*bad_wr = wr;
return -EINVAL;
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH for-next 6/6] RDMA/hns: Assign dest_qp when deregistering mr
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
` (4 preceding siblings ...)
2017-12-23 8:22 ` [PATCH for-next 5/6] RDMA/hns: Fix QP state judgement before sending work requests Lijun Ou
@ 2017-12-23 8:22 ` Lijun Ou
5 siblings, 0 replies; 15+ messages in thread
From: Lijun Ou @ 2017-12-23 8:22 UTC (permalink / raw)
To: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc
Cc: leon-DgEjT+Ai2ygdnm+yROfE0A, linux-rdma-u79uwXL29TY76Z2rM5mHXA
It needs to create eight reserve QPs for resolving
a bug of hip06. When deregistering mr, it will issue
a rdma write for every reserve QPs.
In the above process, it needs to assign dest_qp.
Otherwise, it will trigger free mr work fail.
Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 6100ace..490a1fc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -775,6 +775,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
goto create_lp_qp_failed;
}
+ attr_mask |= IB_QP_DEST_QPN;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
IB_QPS_INIT, IB_QPS_RTR);
if (ret) {
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode
[not found] ` <1514017342-91468-5-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
@ 2017-12-25 9:00 ` Leon Romanovsky
[not found] ` <20171225090028.GY2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
0 siblings, 1 reply; 15+ messages in thread
From: Leon Romanovsky @ 2017-12-25 9:00 UTC (permalink / raw)
To: Lijun Ou
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
[-- Attachment #1: Type: text/plain, Size: 1211 bytes --]
On Sat, Dec 23, 2017 at 04:22:20PM +0800, Lijun Ou wrote:
> When the length of sge is zero, the driver need to filter it
>
> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
> ---
> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 29 ++++++++++++++++++++---------
> 1 file changed, 20 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index 869e36f..a1d8ca0 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -230,26 +230,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
> V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
> } else {
> if (wr->num_sge <= 2) {
> - for (i = 0; i < wr->num_sge; i++)
> - set_data_seg_v2(dseg + i,
> - wr->sg_list + i);
> + for (i = 0; i < wr->num_sge; i++) {
> + if (likely(wr->sg_list[i].length)) {
> + set_data_seg_v2(dseg,
> + wr->sg_list + i);
> + dseg++;
Actually, you don't need to advance desg and keep your "desg + i" code as before.
I have a more general question, is this scenario (having length == 0) real?
Thanks
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 1/6] RDMA/hns: Add rq inline data support for hip08 RoCE
[not found] ` <1514017342-91468-2-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
@ 2017-12-25 9:34 ` Leon Romanovsky
[not found] ` <20171225093448.GZ2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
0 siblings, 1 reply; 15+ messages in thread
From: Leon Romanovsky @ 2017-12-25 9:34 UTC (permalink / raw)
To: Lijun Ou
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
[-- Attachment #1: Type: text/plain, Size: 10858 bytes --]
On Sat, Dec 23, 2017 at 04:22:17PM +0800, Lijun Ou wrote:
> This patch mainly implement rq inline data feature for hip08
> RoCE in kernel mode.
>
> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
> ---
> drivers/infiniband/hw/hns/hns_roce_device.h | 18 +++++++++
> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 63 ++++++++++++++++++++++++++++-
> drivers/infiniband/hw/hns/hns_roce_qp.c | 52 ++++++++++++++++++++----
> 3 files changed, 125 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
> index dcfd209..8d123d3 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_device.h
> +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
> @@ -178,6 +178,7 @@ enum {
> enum {
> HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
> HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
> + HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
> };
>
> enum hns_roce_mtt_type {
> @@ -446,6 +447,21 @@ struct hns_roce_cmd_mailbox {
>
> struct hns_roce_dev;
>
> +struct hns_roce_rinl_sge {
> + void *addr;
> + u32 len;
> +};
> +
> +struct hns_roce_rinl_wqe {
> + struct hns_roce_rinl_sge *sg_list;
> + u32 sge_cnt;
> +};
> +
> +struct hns_roce_rinl_buf {
> + struct hns_roce_rinl_wqe *wqe_list;
> + u32 wqe_cnt;
> +};
> +
> struct hns_roce_qp {
> struct ib_qp ibqp;
> struct hns_roce_buf hr_buf;
> @@ -477,6 +493,8 @@ struct hns_roce_qp {
>
> struct hns_roce_sge sge;
> u32 next_sge;
> +
> + struct hns_roce_rinl_buf rq_inl_buf;
> };
>
> struct hns_roce_sqp {
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index 4d3e976..b17dcfa 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -299,6 +299,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
> struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
> struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
> struct hns_roce_v2_wqe_data_seg *dseg;
> + struct hns_roce_rinl_sge *sge_list;
> struct device *dev = hr_dev->dev;
> struct hns_roce_v2_db rq_db;
> unsigned long flags;
> @@ -347,6 +348,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
> dseg[i].addr = 0;
> }
>
> + /* rq support inline data */
> + sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
> + hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
> + for (i = 0; i < wr->num_sge; i++) {
> + sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
> + sge_list[i].len = wr->sg_list[i].length;
> + }
> +
> hr_qp->rq.wrid[ind] = wr->wr_id;
>
> ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
> @@ -961,7 +970,8 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
> caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
>
> caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
> - HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
> + HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
> + HNS_ROCE_CAP_FLAG_RQ_INLINE;
> caps->pkey_table_len[0] = 1;
> caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
> caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
> @@ -1476,6 +1486,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
> static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
> struct hns_roce_qp **cur_qp, struct ib_wc *wc)
> {
> + struct hns_roce_rinl_sge *sge_list;
> struct hns_roce_dev *hr_dev;
> struct hns_roce_v2_cqe *cqe;
> struct hns_roce_qp *hr_qp;
> @@ -1673,6 +1684,46 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
> break;
> }
>
> + if ((wc->qp->qp_type == IB_QPT_RC ||
> + wc->qp->qp_type == IB_QPT_UC) &&
> + (opcode == HNS_ROCE_V2_OPCODE_SEND ||
> + opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
> + opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
> + (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
It is better to put in separate function or inline function if you very worried about performance issues.
> + u32 wr_num, wr_cnt, sge_num, sge_cnt;
> + u32 data_len, size;
> + u8 *wqe_buf;
> +
> + wr_num = (u16)roce_get_field(cqe->byte_4,
> + V2_CQE_BYTE_4_WQE_INDX_M,
> + V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
> + wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
> +
> + sge_list =
> + (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
> + sge_num =
> + (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
Such level of indirection means that it is time to introduce temp variables.
> + wqe_buf = (u8 *)get_recv_wqe(*cur_qp, wr_cnt);
> + data_len = wc->byte_len;
> +
> + for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len);
> + sge_cnt++) {
> + size = sge_list[sge_cnt].len < data_len ?
> + sge_list[sge_cnt].len : data_len;
Use min(a, b) macro available in kernel headers.
> +
> + memcpy((void *)sge_list[sge_cnt].addr,
> + (void *)wqe_buf, size);
> +
> + data_len -= size;
> + wqe_buf += size;
> + }
> +
> + if (data_len) {
> + wc->status = IB_WC_LOC_LEN_ERR;
> + return -EAGAIN;
> + }
> + }
To be honest the whole new chunk is very strange, it has casting in
almost every line and looks very suspicious. Can you clean the
variables/function return values to avoid castings?
> +
> /* Update tail pointer, record wr_id */
> wq = &(*cur_qp)->rq;
> wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
> @@ -1972,6 +2023,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
> !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
> roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
>
> + roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
> roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
>
> roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
> @@ -3114,6 +3166,15 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
> hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
> }
>
> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
> + if (hr_qp->rq_inl_buf.wqe_list) {
> + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
> + kfree(hr_qp->rq_inl_buf.wqe_list);
> + hr_qp->rq_inl_buf.wqe_list[0].sg_list = NULL;
Did it work? You called to kfree the hr_qp->rq_inl_buf.wqe_list a line above?
> + hr_qp->rq_inl_buf.wqe_list = NULL;
> + }
> + }
> +
> return 0;
> }
>
> diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
> index 69e2584..7c75f21b 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_qp.c
> +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
> @@ -494,6 +494,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
> int ret = 0;
> u32 page_shift;
> u32 npages;
> + int i;
>
> mutex_init(&hr_qp->mutex);
> spin_lock_init(&hr_qp->sq.lock);
> @@ -513,18 +514,42 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
> goto err_out;
> }
>
> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
> + /* allocate recv inline buf */
> + hr_qp->rq_inl_buf.wqe_list = kmalloc_array(hr_qp->rq.wqe_cnt,
> + sizeof(struct hns_roce_rinl_wqe),
> + GFP_KERNEL);
> + if (!hr_qp->rq_inl_buf.wqe_list)
> + goto err_out;
> +
> + hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
> +
> + hr_qp->rq_inl_buf.wqe_list[0].sg_list =
> + kmalloc_array(hr_qp->rq_inl_buf.wqe_cnt *
> + init_attr->cap.max_recv_sge,
> + sizeof(struct hns_roce_rinl_sge),
> + GFP_KERNEL);
> + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list)
> + goto err_wqe_list;
> +
> + for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
> + hr_qp->rq_inl_buf.wqe_list[i].sg_list =
> + &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
> + init_attr->cap.max_recv_sge];
> + }
I'm not an expert in this area of code, but the call to kmalloc_array on
the first field of already kmalloc_array created pointer looks strange.
Don't know what you can do with that comment, maybe other reviewers can suggest.
> +
> if (ib_pd->uobject) {
> if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
> dev_err(dev, "ib_copy_from_udata error for create qp\n");
> ret = -EFAULT;
> - goto err_out;
> + goto err_rq_sge_list;
> }
>
> ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
> &ucmd);
> if (ret) {
> dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
> - goto err_out;
> + goto err_rq_sge_list;
> }
>
> hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
> @@ -533,7 +558,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
> if (IS_ERR(hr_qp->umem)) {
> dev_err(dev, "ib_umem_get error for create qp\n");
> ret = PTR_ERR(hr_qp->umem);
> - goto err_out;
> + goto err_rq_sge_list;
> }
>
> hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
> @@ -567,13 +592,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
> IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
> dev_err(dev, "init_attr->create_flags error!\n");
> ret = -EINVAL;
> - goto err_out;
> + goto err_rq_sge_list;
> }
>
> if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
> dev_err(dev, "init_attr->create_flags error!\n");
> ret = -EINVAL;
> - goto err_out;
> + goto err_rq_sge_list;
> }
>
> /* Set SQ size */
> @@ -581,7 +606,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
> hr_qp);
> if (ret) {
> dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
> - goto err_out;
> + goto err_rq_sge_list;
> }
>
> /* QP doorbell register address */
> @@ -597,7 +622,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
> &hr_qp->hr_buf, page_shift)) {
> dev_err(dev, "hns_roce_buf_alloc error!\n");
> ret = -ENOMEM;
> - goto err_out;
> + goto err_rq_sge_list;
> }
>
> hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
> @@ -679,6 +704,19 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
> else
> hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
>
> +err_rq_sge_list:
> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
> + if (hr_qp->rq_inl_buf.wqe_list) {
> + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
> + hr_qp->rq_inl_buf.wqe_list[0].sg_list = NULL;
IMHO, in general case, it is preferable to avoid setting NULL after kfree.
> + }
> +
> +err_wqe_list:
> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
> + kfree(hr_qp->rq_inl_buf.wqe_list);
> + hr_qp->rq_inl_buf.wqe_list = NULL;
> + }
> +
> err_out:
> return ret;
> }
> --
> 1.9.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 2/6] RDMA/hns: Update the usage of sr_max and rr_max field
[not found] ` <1514017342-91468-3-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
@ 2017-12-25 9:41 ` Leon Romanovsky
[not found] ` <20171225094112.GA2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
0 siblings, 1 reply; 15+ messages in thread
From: Leon Romanovsky @ 2017-12-25 9:41 UTC (permalink / raw)
To: Lijun Ou
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
[-- Attachment #1: Type: text/plain, Size: 3482 bytes --]
On Sat, Dec 23, 2017 at 04:22:18PM +0800, Lijun Ou wrote:
> This patch fixes the usage with sr_max filed and rr_max of qp
> context when modify qp. Its modifications include:
> 1. Adjust location of filling sr_max filed of qpc
> 2. Only assign the number of responder resource if
> IB_QP_MAX_DEST_RD_ATOMIC bit is set
> 3. Only assign the number of outstanding resource if
> IB_QP_MAX_QP_RD_ATOMIC
> 4. Fix the assgin algorithms for the field of sr_max
> and rr_max of qp context
>
> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
> ---
> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 33 ++++++++++++++++++++----------
> 1 file changed, 22 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index b17dcfa..092f133 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -2531,11 +2531,17 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
> roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
> }
>
> - roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
> - V2_QPC_BYTE_140_RR_MAX_S,
> - ilog2((unsigned int)attr->max_dest_rd_atomic));
> - roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
> - V2_QPC_BYTE_140_RR_MAX_S, 0);
> + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
> + if (attr->max_dest_rd_atomic) {
if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
> + roce_set_field(context->byte_140_raq,
> + V2_QPC_BYTE_140_RR_MAX_M,
> + V2_QPC_BYTE_140_RR_MAX_S,
> + fls(attr->max_dest_rd_atomic - 1));
> + roce_set_field(qpc_mask->byte_140_raq,
> + V2_QPC_BYTE_140_RR_MAX_M,
> + V2_QPC_BYTE_140_RR_MAX_S, 0);
> + }
> + }
>
> roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
> V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
> @@ -2625,12 +2631,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
> V2_QPC_BYTE_168_LP_SGEN_INI_M,
> V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
>
> - roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
> - V2_QPC_BYTE_208_SR_MAX_S,
> - ilog2((unsigned int)attr->max_rd_atomic));
> - roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
> - V2_QPC_BYTE_208_SR_MAX_S, 0);
> -
> roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
> V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
> roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
> @@ -2834,6 +2834,17 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
> roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
> V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
>
> + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
> + if (attr->max_rd_atomic) {
> + roce_set_field(context->byte_208_irrl,
> + V2_QPC_BYTE_208_SR_MAX_M,
> + V2_QPC_BYTE_208_SR_MAX_S,
> + fls(attr->max_rd_atomic - 1));
> + roce_set_field(qpc_mask->byte_208_irrl,
> + V2_QPC_BYTE_208_SR_MAX_M,
> + V2_QPC_BYTE_208_SR_MAX_S, 0);
> + }
> + }
> return 0;
> }
>
> --
> 1.9.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode
[not found] ` <20171225090028.GY2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-12-25 10:08 ` oulijun
[not found] ` <42e22cc6-f385-b6f4-bd14-1450ae19519a-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
0 siblings, 1 reply; 15+ messages in thread
From: oulijun @ 2017-12-25 10:08 UTC (permalink / raw)
To: Leon Romanovsky
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
在 2017/12/25 17:00, Leon Romanovsky 写道:
> On Sat, Dec 23, 2017 at 04:22:20PM +0800, Lijun Ou wrote:
>> When the length of sge is zero, the driver need to filter it
>>
>> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
>> ---
>> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 29 ++++++++++++++++++++---------
>> 1 file changed, 20 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> index 869e36f..a1d8ca0 100644
>> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> @@ -230,26 +230,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
>> V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
>> } else {
>> if (wr->num_sge <= 2) {
>> - for (i = 0; i < wr->num_sge; i++)
>> - set_data_seg_v2(dseg + i,
>> - wr->sg_list + i);
>> + for (i = 0; i < wr->num_sge; i++) {
>> + if (likely(wr->sg_list[i].length)) {
>> + set_data_seg_v2(dseg,
>> + wr->sg_list + i);
>> + dseg++;
>
> Actually, you don't need to advance desg and keep your "desg + i" code as before.
>
> I have a more general question, is this scenario (having length == 0) real?
>
> Thanks
>
Yes, The origin plan for processing the length of sge is zero is that the hardware recognizes
and filter it. but the final plan is that the software filter it and have an improved performance.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 2/6] RDMA/hns: Update the usage of sr_max and rr_max field
[not found] ` <20171225094112.GA2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-12-25 10:14 ` oulijun
0 siblings, 0 replies; 15+ messages in thread
From: oulijun @ 2017-12-25 10:14 UTC (permalink / raw)
To: Leon Romanovsky
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
在 2017/12/25 17:41, Leon Romanovsky 写道:
> On Sat, Dec 23, 2017 at 04:22:18PM +0800, Lijun Ou wrote:
>> This patch fixes the usage with sr_max filed and rr_max of qp
>> context when modify qp. Its modifications include:
>> 1. Adjust location of filling sr_max filed of qpc
>> 2. Only assign the number of responder resource if
>> IB_QP_MAX_DEST_RD_ATOMIC bit is set
>> 3. Only assign the number of outstanding resource if
>> IB_QP_MAX_QP_RD_ATOMIC
>> 4. Fix the assgin algorithms for the field of sr_max
>> and rr_max of qp context
>>
>> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
>> ---
>> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 33 ++++++++++++++++++++----------
>> 1 file changed, 22 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> index b17dcfa..092f133 100644
>> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> @@ -2531,11 +2531,17 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
>> roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
>> }
>>
>> - roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
>> - V2_QPC_BYTE_140_RR_MAX_S,
>> - ilog2((unsigned int)attr->max_dest_rd_atomic));
>> - roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
>> - V2_QPC_BYTE_140_RR_MAX_S, 0);
>> + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
>> + if (attr->max_dest_rd_atomic) {
>
> if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
>
Good advice, thanks. I will fix it.
>
>
>> + roce_set_field(context->byte_140_raq,
>> + V2_QPC_BYTE_140_RR_MAX_M,
>> + V2_QPC_BYTE_140_RR_MAX_S,
>> + fls(attr->max_dest_rd_atomic - 1));
>> + roce_set_field(qpc_mask->byte_140_raq,
>> + V2_QPC_BYTE_140_RR_MAX_M,
>> + V2_QPC_BYTE_140_RR_MAX_S, 0);
>> + }
>> + }
>>
>> roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
>> V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
>> @@ -2625,12 +2631,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
>> V2_QPC_BYTE_168_LP_SGEN_INI_M,
>> V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
>>
>> - roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
>> - V2_QPC_BYTE_208_SR_MAX_S,
>> - ilog2((unsigned int)attr->max_rd_atomic));
>> - roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
>> - V2_QPC_BYTE_208_SR_MAX_S, 0);
>> -
>> roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
>> V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
>> roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
>> @@ -2834,6 +2834,17 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
>> roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
>> V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
>>
>> + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
>> + if (attr->max_rd_atomic) {
>> + roce_set_field(context->byte_208_irrl,
>> + V2_QPC_BYTE_208_SR_MAX_M,
>> + V2_QPC_BYTE_208_SR_MAX_S,
>> + fls(attr->max_rd_atomic - 1));
>> + roce_set_field(qpc_mask->byte_208_irrl,
>> + V2_QPC_BYTE_208_SR_MAX_M,
>> + V2_QPC_BYTE_208_SR_MAX_S, 0);
>> + }
>> + }
>> return 0;
>> }
>>
>> --
>> 1.9.1
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
>> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode
[not found] ` <42e22cc6-f385-b6f4-bd14-1450ae19519a-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
@ 2017-12-25 10:20 ` Leon Romanovsky
[not found] ` <20171225102045.GC2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
0 siblings, 1 reply; 15+ messages in thread
From: Leon Romanovsky @ 2017-12-25 10:20 UTC (permalink / raw)
To: oulijun
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
[-- Attachment #1: Type: text/plain, Size: 1759 bytes --]
On Mon, Dec 25, 2017 at 06:08:08PM +0800, oulijun wrote:
> 在 2017/12/25 17:00, Leon Romanovsky 写道:
> > On Sat, Dec 23, 2017 at 04:22:20PM +0800, Lijun Ou wrote:
> >> When the length of sge is zero, the driver need to filter it
> >>
> >> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
> >> ---
> >> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 29 ++++++++++++++++++++---------
> >> 1 file changed, 20 insertions(+), 9 deletions(-)
> >>
> >> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> >> index 869e36f..a1d8ca0 100644
> >> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> >> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> >> @@ -230,26 +230,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
> >> V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
> >> } else {
> >> if (wr->num_sge <= 2) {
> >> - for (i = 0; i < wr->num_sge; i++)
> >> - set_data_seg_v2(dseg + i,
> >> - wr->sg_list + i);
> >> + for (i = 0; i < wr->num_sge; i++) {
> >> + if (likely(wr->sg_list[i].length)) {
> >> + set_data_seg_v2(dseg,
> >> + wr->sg_list + i);
> >> + dseg++;
> >
> > Actually, you don't need to advance desg and keep your "desg + i" code as before.
> >
> > I have a more general question, is this scenario (having length == 0) real?
> >
> > Thanks
> >
> Yes, The origin plan for processing the length of sge is zero is that the hardware recognizes
> and filter it. but the final plan is that the software filter it and have an improved performance.
>
My question was an attempt to understand if kernel can create sg_list with length equal to zero.
Thanks
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 1/6] RDMA/hns: Add rq inline data support for hip08 RoCE
[not found] ` <20171225093448.GZ2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-12-26 9:04 ` oulijun
0 siblings, 0 replies; 15+ messages in thread
From: oulijun @ 2017-12-26 9:04 UTC (permalink / raw)
To: Leon Romanovsky
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
在 2017/12/25 17:34, Leon Romanovsky 写道:
> On Sat, Dec 23, 2017 at 04:22:17PM +0800, Lijun Ou wrote:
>> This patch mainly implement rq inline data feature for hip08
>> RoCE in kernel mode.
>>
>> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
>> ---
>> drivers/infiniband/hw/hns/hns_roce_device.h | 18 +++++++++
>> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 63 ++++++++++++++++++++++++++++-
>> drivers/infiniband/hw/hns/hns_roce_qp.c | 52 ++++++++++++++++++++----
>> 3 files changed, 125 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
>> index dcfd209..8d123d3 100644
>> --- a/drivers/infiniband/hw/hns/hns_roce_device.h
>> +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
>> @@ -178,6 +178,7 @@ enum {
>> enum {
>> HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
>> HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
>> + HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
>> };
>>
>> enum hns_roce_mtt_type {
>> @@ -446,6 +447,21 @@ struct hns_roce_cmd_mailbox {
>>
>> struct hns_roce_dev;
>>
>> +struct hns_roce_rinl_sge {
>> + void *addr;
>> + u32 len;
>> +};
>> +
>> +struct hns_roce_rinl_wqe {
>> + struct hns_roce_rinl_sge *sg_list;
>> + u32 sge_cnt;
>> +};
>> +
>> +struct hns_roce_rinl_buf {
>> + struct hns_roce_rinl_wqe *wqe_list;
>> + u32 wqe_cnt;
>> +};
>> +
>> struct hns_roce_qp {
>> struct ib_qp ibqp;
>> struct hns_roce_buf hr_buf;
>> @@ -477,6 +493,8 @@ struct hns_roce_qp {
>>
>> struct hns_roce_sge sge;
>> u32 next_sge;
>> +
>> + struct hns_roce_rinl_buf rq_inl_buf;
>> };
>>
>> struct hns_roce_sqp {
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> index 4d3e976..b17dcfa 100644
>> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> @@ -299,6 +299,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
>> struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
>> struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
>> struct hns_roce_v2_wqe_data_seg *dseg;
>> + struct hns_roce_rinl_sge *sge_list;
>> struct device *dev = hr_dev->dev;
>> struct hns_roce_v2_db rq_db;
>> unsigned long flags;
>> @@ -347,6 +348,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
>> dseg[i].addr = 0;
>> }
>>
>> + /* rq support inline data */
>> + sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
>> + hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
>> + for (i = 0; i < wr->num_sge; i++) {
>> + sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
>> + sge_list[i].len = wr->sg_list[i].length;
>> + }
>> +
>> hr_qp->rq.wrid[ind] = wr->wr_id;
>>
>> ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
>> @@ -961,7 +970,8 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
>> caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
>>
>> caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
>> - HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
>> + HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
>> + HNS_ROCE_CAP_FLAG_RQ_INLINE;
>> caps->pkey_table_len[0] = 1;
>> caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
>> caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
>> @@ -1476,6 +1486,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
>> static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
>> struct hns_roce_qp **cur_qp, struct ib_wc *wc)
>> {
>> + struct hns_roce_rinl_sge *sge_list;
>> struct hns_roce_dev *hr_dev;
>> struct hns_roce_v2_cqe *cqe;
>> struct hns_roce_qp *hr_qp;
>> @@ -1673,6 +1684,46 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
>> break;
>> }
>>
>> + if ((wc->qp->qp_type == IB_QPT_RC ||
>> + wc->qp->qp_type == IB_QPT_UC) &&
>> + (opcode == HNS_ROCE_V2_OPCODE_SEND ||
>> + opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
>> + opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
>> + (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
>
> It is better to put in separate function or inline function if you very worried about performance issues.
>
Yes, I will refactor it as function.
>> + u32 wr_num, wr_cnt, sge_num, sge_cnt;
>> + u32 data_len, size;
>> + u8 *wqe_buf;
>> +
>> + wr_num = (u16)roce_get_field(cqe->byte_4,
>> + V2_CQE_BYTE_4_WQE_INDX_M,
>> + V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
>> + wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
>> +
>> + sge_list =
>> + (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
>> + sge_num =
>> + (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
>
> Such level of indirection means that it is time to introduce temp variables.
>
Maybe, it is ok after refactoring it.
>> + wqe_buf = (u8 *)get_recv_wqe(*cur_qp, wr_cnt);
>> + data_len = wc->byte_len;
>> +
>> + for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len);
>> + sge_cnt++) {
>> + size = sge_list[sge_cnt].len < data_len ?
>> + sge_list[sge_cnt].len : data_len;
>
> Use min(a, b) macro available in kernel headers.
yes, thanks.
>
>> +
>> + memcpy((void *)sge_list[sge_cnt].addr,
>> + (void *)wqe_buf, size);
>> +
>> + data_len -= size;
>> + wqe_buf += size;
>> + }
>> +
>> + if (data_len) {
>> + wc->status = IB_WC_LOC_LEN_ERR;
>> + return -EAGAIN;
>> + }
>> + }
>
> To be honest the whole new chunk is very strange, it has casting in
> almost every line and looks very suspicious. Can you clean the
> variables/function return values to avoid castings?
>
>> +
>> /* Update tail pointer, record wr_id */
>> wq = &(*cur_qp)->rq;
>> wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
>> @@ -1972,6 +2023,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
>> !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
>> roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
>>
>> + roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
>> roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
>>
>> roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
>> @@ -3114,6 +3166,15 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
>> hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
>> }
>>
>> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
>> + if (hr_qp->rq_inl_buf.wqe_list) {
>> + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
>> + kfree(hr_qp->rq_inl_buf.wqe_list);
>> + hr_qp->rq_inl_buf.wqe_list[0].sg_list = NULL;
>
> Did it work? You called to kfree the hr_qp->rq_inl_buf.wqe_list a line above?
>
>> + hr_qp->rq_inl_buf.wqe_list = NULL;
>> + }
>> + }
>> +
>> return 0;
>> }
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
>> index 69e2584..7c75f21b 100644
>> --- a/drivers/infiniband/hw/hns/hns_roce_qp.c
>> +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
>> @@ -494,6 +494,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
>> int ret = 0;
>> u32 page_shift;
>> u32 npages;
>> + int i;
>>
>> mutex_init(&hr_qp->mutex);
>> spin_lock_init(&hr_qp->sq.lock);
>> @@ -513,18 +514,42 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
>> goto err_out;
>> }
>>
>> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
>> + /* allocate recv inline buf */
>> + hr_qp->rq_inl_buf.wqe_list = kmalloc_array(hr_qp->rq.wqe_cnt,
>> + sizeof(struct hns_roce_rinl_wqe),
>> + GFP_KERNEL);
>> + if (!hr_qp->rq_inl_buf.wqe_list)
>> + goto err_out;
>> +
>> + hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
>> +
>> + hr_qp->rq_inl_buf.wqe_list[0].sg_list =
>> + kmalloc_array(hr_qp->rq_inl_buf.wqe_cnt *
>> + init_attr->cap.max_recv_sge,
>> + sizeof(struct hns_roce_rinl_sge),
>> + GFP_KERNEL);
>> + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list)
>> + goto err_wqe_list;
>> +
>> + for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
>> + hr_qp->rq_inl_buf.wqe_list[i].sg_list =
>> + &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
>> + init_attr->cap.max_recv_sge];
>> + }
>
> I'm not an expert in this area of code, but the call to kmalloc_array on
> the first field of already kmalloc_array created pointer looks strange.
>
> Don't know what you can do with that comment, maybe other reviewers can suggest.
>
>> +
>> if (ib_pd->uobject) {
>> if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
>> dev_err(dev, "ib_copy_from_udata error for create qp\n");
>> ret = -EFAULT;
>> - goto err_out;
>> + goto err_rq_sge_list;
>> }
>>
>> ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
>> &ucmd);
>> if (ret) {
>> dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
>> - goto err_out;
>> + goto err_rq_sge_list;
>> }
>>
>> hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
>> @@ -533,7 +558,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
>> if (IS_ERR(hr_qp->umem)) {
>> dev_err(dev, "ib_umem_get error for create qp\n");
>> ret = PTR_ERR(hr_qp->umem);
>> - goto err_out;
>> + goto err_rq_sge_list;
>> }
>>
>> hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
>> @@ -567,13 +592,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
>> IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
>> dev_err(dev, "init_attr->create_flags error!\n");
>> ret = -EINVAL;
>> - goto err_out;
>> + goto err_rq_sge_list;
>> }
>>
>> if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
>> dev_err(dev, "init_attr->create_flags error!\n");
>> ret = -EINVAL;
>> - goto err_out;
>> + goto err_rq_sge_list;
>> }
>>
>> /* Set SQ size */
>> @@ -581,7 +606,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
>> hr_qp);
>> if (ret) {
>> dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
>> - goto err_out;
>> + goto err_rq_sge_list;
>> }
>>
>> /* QP doorbell register address */
>> @@ -597,7 +622,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
>> &hr_qp->hr_buf, page_shift)) {
>> dev_err(dev, "hns_roce_buf_alloc error!\n");
>> ret = -ENOMEM;
>> - goto err_out;
>> + goto err_rq_sge_list;
>> }
>>
>> hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
>> @@ -679,6 +704,19 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
>> else
>> hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
>>
>> +err_rq_sge_list:
>> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
>> + if (hr_qp->rq_inl_buf.wqe_list) {
>> + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
>> + hr_qp->rq_inl_buf.wqe_list[0].sg_list = NULL;
>
> IMHO, in general case, it is preferable to avoid setting NULL after kfree.
>
>> + }
>> +
>> +err_wqe_list:
>> + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
>> + kfree(hr_qp->rq_inl_buf.wqe_list);
>> + hr_qp->rq_inl_buf.wqe_list = NULL;
>> + }
>> +
>> err_out:
>> return ret;
>> }
>> --
>> 1.9.1
>>
Thanks your reviewing. I will send the next patch for fix them.
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
>> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode
[not found] ` <20171225102045.GC2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-12-26 9:19 ` oulijun
0 siblings, 0 replies; 15+ messages in thread
From: oulijun @ 2017-12-26 9:19 UTC (permalink / raw)
To: Leon Romanovsky
Cc: dledford-H+wXaHxf7aLQT0dZR+AlfA, jgg-uk2M96/98Pc,
linux-rdma-u79uwXL29TY76Z2rM5mHXA
在 2017/12/25 18:20, Leon Romanovsky 写道:
> On Mon, Dec 25, 2017 at 06:08:08PM +0800, oulijun wrote:
>> 在 2017/12/25 17:00, Leon Romanovsky 写道:
>>> On Sat, Dec 23, 2017 at 04:22:20PM +0800, Lijun Ou wrote:
>>>> When the length of sge is zero, the driver need to filter it
>>>>
>>>> Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
>>>> ---
>>>> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 29 ++++++++++++++++++++---------
>>>> 1 file changed, 20 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>>>> index 869e36f..a1d8ca0 100644
>>>> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>>>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>>>> @@ -230,26 +230,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
>>>> V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
>>>> } else {
>>>> if (wr->num_sge <= 2) {
>>>> - for (i = 0; i < wr->num_sge; i++)
>>>> - set_data_seg_v2(dseg + i,
>>>> - wr->sg_list + i);
>>>> + for (i = 0; i < wr->num_sge; i++) {
>>>> + if (likely(wr->sg_list[i].length)) {
>>>> + set_data_seg_v2(dseg,
>>>> + wr->sg_list + i);
>>>> + dseg++;
>>>
>>> Actually, you don't need to advance desg and keep your "desg + i" code as before.
>>>
>>> I have a more general question, is this scenario (having length == 0) real?
>>>
>>> Thanks
>>>
>> Yes, The origin plan for processing the length of sge is zero is that the hardware recognizes
>> and filter it. but the final plan is that the software filter it and have an improved performance.
>>
>
> My question was an attempt to understand if kernel can create sg_list with length equal to zero.
> Thanks
>
Yes, I think that can. Also, we need to check it similar to the invalid in pararm.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2017-12-26 9:19 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-12-23 8:22 [PATCH for-next 0/6] Add rq inline and bugfixes for hns Lijun Ou
[not found] ` <1514017342-91468-1-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-23 8:22 ` [PATCH for-next 1/6] RDMA/hns: Add rq inline data support for hip08 RoCE Lijun Ou
[not found] ` <1514017342-91468-2-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-25 9:34 ` Leon Romanovsky
[not found] ` <20171225093448.GZ2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-12-26 9:04 ` oulijun
2017-12-23 8:22 ` [PATCH for-next 2/6] RDMA/hns: Update the usage of sr_max and rr_max field Lijun Ou
[not found] ` <1514017342-91468-3-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-25 9:41 ` Leon Romanovsky
[not found] ` <20171225094112.GA2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-12-25 10:14 ` oulijun
2017-12-23 8:22 ` [PATCH for-next 3/6] RDMA/hns: Set access flags of hip08 RoCE Lijun Ou
2017-12-23 8:22 ` [PATCH for-next 4/6] RDMA/hns: Filter for zero length of sge in hip08 kernel mode Lijun Ou
[not found] ` <1514017342-91468-5-git-send-email-oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-25 9:00 ` Leon Romanovsky
[not found] ` <20171225090028.GY2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-12-25 10:08 ` oulijun
[not found] ` <42e22cc6-f385-b6f4-bd14-1450ae19519a-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-12-25 10:20 ` Leon Romanovsky
[not found] ` <20171225102045.GC2942-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-12-26 9:19 ` oulijun
2017-12-23 8:22 ` [PATCH for-next 5/6] RDMA/hns: Fix QP state judgement before sending work requests Lijun Ou
2017-12-23 8:22 ` [PATCH for-next 6/6] RDMA/hns: Assign dest_qp when deregistering mr Lijun Ou
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox