* [PATCH for-next 1/6] RDMA/hns: Optimize hns_roce_config_link_table()
2020-04-13 6:40 [PATCH for-next 0/6] RDMA/hns: Codes optimization Weihang Li
@ 2020-04-13 6:40 ` Weihang Li
2020-04-13 6:40 ` [PATCH for-next 2/6] RDMA/hns: Optimize hns_roce_v2_set_mac() Weihang Li
` (4 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Weihang Li @ 2020-04-13 6:40 UTC (permalink / raw)
To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm
From: Lijun Ou <oulijun@huawei.com>
Remove the unnecessary memset operation and adjust style of some lines in
hns_roce_config_link_table().
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 53 ++++++++++++------------------
1 file changed, 21 insertions(+), 32 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c331667..a580de9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2040,8 +2040,6 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
page_num = link_tbl->npages;
entry = link_tbl->table.buf;
- memset(req_a, 0, sizeof(*req_a));
- memset(req_b, 0, sizeof(*req_b));
for (i = 0; i < 2; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
@@ -2050,39 +2048,30 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
-
- if (i == 0) {
- req_a->base_addr_l =
- cpu_to_le32(link_tbl->table.map & 0xffffffff);
- req_a->base_addr_h =
- cpu_to_le32(link_tbl->table.map >> 32);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
- link_tbl->npages);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
- link_tbl->pg_sz);
- req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
- req_a->head_ba_h_nxtptr =
- cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
- roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
- CFG_LLM_HEAD_PTR_S, 0);
- } else {
- req_b->tail_ba_l =
- cpu_to_le32(entry[page_num - 1].blk_ba0);
- roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
- CFG_LLM_TAIL_BA_H_S,
- entry[page_num - 1].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_BA1_M);
- roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
- CFG_LLM_TAIL_PTR_S,
- (entry[page_num - 2].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
- HNS_ROCE_LINK_TABLE_NXT_PTR_S);
- }
}
+
+ req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
+ req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
+ CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
+ CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
CFG_LLM_INIT_EN_S, 1);
+ req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
+ req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
+ roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
+ 0);
+
+ req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
+ roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
+ CFG_LLM_TAIL_BA_H_S,
+ entry[page_num - 1].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_BA1_M);
+ roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
+ (entry[page_num - 2].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S);
return hns_roce_cmq_send(hr_dev, desc, 2);
}
--
2.8.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH for-next 2/6] RDMA/hns: Optimize hns_roce_v2_set_mac()
2020-04-13 6:40 [PATCH for-next 0/6] RDMA/hns: Codes optimization Weihang Li
2020-04-13 6:40 ` [PATCH for-next 1/6] RDMA/hns: Optimize hns_roce_config_link_table() Weihang Li
@ 2020-04-13 6:40 ` Weihang Li
2020-04-13 6:40 ` [PATCH for-next 3/6] RDMA/hns: Simplify the qp state convert code Weihang Li
` (3 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Weihang Li @ 2020-04-13 6:40 UTC (permalink / raw)
To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm
From: Lijun Ou <oulijun@huawei.com>
Removes the unnecessary memset opertaion and adjust style of some lines in
hns_roce_v2_set_mac().
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a580de9..6816278 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2427,12 +2427,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
reg_smac_l = *(u32 *)(&addr[0]);
reg_smac_h = *(u16 *)(&addr[4]);
- memset(smac_tb, 0, sizeof(*smac_tb));
- roce_set_field(smac_tb->tb_idx_rsv,
- CFG_SMAC_TB_IDX_M,
+ roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
CFG_SMAC_TB_IDX_S, phy_port);
- roce_set_field(smac_tb->vf_smac_h_rsv,
- CFG_SMAC_TB_VF_SMAC_H_M,
+ roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
--
2.8.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH for-next 3/6] RDMA/hns: Simplify the qp state convert code
2020-04-13 6:40 [PATCH for-next 0/6] RDMA/hns: Codes optimization Weihang Li
2020-04-13 6:40 ` [PATCH for-next 1/6] RDMA/hns: Optimize hns_roce_config_link_table() Weihang Li
2020-04-13 6:40 ` [PATCH for-next 2/6] RDMA/hns: Optimize hns_roce_v2_set_mac() Weihang Li
@ 2020-04-13 6:40 ` Weihang Li
2020-04-14 12:56 ` Jason Gunthorpe
2020-04-13 6:40 ` [PATCH for-next 4/6] RDMA/hns: Simplify the cqe code of poll cq Weihang Li
` (2 subsequent siblings)
5 siblings, 1 reply; 11+ messages in thread
From: Weihang Li @ 2020-04-13 6:40 UTC (permalink / raw)
To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm
From: Lang Cheng <chenglang@huawei.com>
Use type map table to reduce the cyclomatic complexity.
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 6816278..e938bd8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4540,19 +4540,20 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
return ret;
}
-static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
-{
- switch (state) {
- case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
- case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
- case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
- case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
- case HNS_ROCE_QP_ST_SQ_DRAINING:
- case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
- case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
- case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
- default: return -1;
- }
+static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
+{
+ const enum ib_qp_state map[] = {
+ [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
+ [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
+ [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
+ [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
+ [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
+ [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
+ [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
+ [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
+ };
+
+ return (state < ARRAY_SIZE(map)) ? map[state] : -1;
}
static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
--
2.8.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH for-next 3/6] RDMA/hns: Simplify the qp state convert code
2020-04-13 6:40 ` [PATCH for-next 3/6] RDMA/hns: Simplify the qp state convert code Weihang Li
@ 2020-04-14 12:56 ` Jason Gunthorpe
2020-04-15 1:15 ` liweihang
0 siblings, 1 reply; 11+ messages in thread
From: Jason Gunthorpe @ 2020-04-14 12:56 UTC (permalink / raw)
To: Weihang Li; +Cc: dledford, leon, linux-rdma, linuxarm
On Mon, Apr 13, 2020 at 02:40:39PM +0800, Weihang Li wrote:
> From: Lang Cheng <chenglang@huawei.com>
>
> Use type map table to reduce the cyclomatic complexity.
>
> Signed-off-by: Lang Cheng <chenglang@huawei.com>
> Signed-off-by: Weihang Li <liweihang@huawei.com>
> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 27 ++++++++++++++-------------
> 1 file changed, 14 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index 6816278..e938bd8 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -4540,19 +4540,20 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
> return ret;
> }
>
> -static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
> -{
> - switch (state) {
> - case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
> - case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
> - case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
> - case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
> - case HNS_ROCE_QP_ST_SQ_DRAINING:
> - case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
> - case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
> - case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
> - default: return -1;
> - }
> +static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
> +{
> + const enum ib_qp_state map[] = {
Should be static
Jason
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH for-next 3/6] RDMA/hns: Simplify the qp state convert code
2020-04-14 12:56 ` Jason Gunthorpe
@ 2020-04-15 1:15 ` liweihang
0 siblings, 0 replies; 11+ messages in thread
From: liweihang @ 2020-04-15 1:15 UTC (permalink / raw)
To: Jason Gunthorpe
Cc: dledford@redhat.com, leon@kernel.org, linux-rdma@vger.kernel.org,
Linuxarm
On 2020/4/14 20:56, Jason Gunthorpe wrote:
> On Mon, Apr 13, 2020 at 02:40:39PM +0800, Weihang Li wrote:
>> From: Lang Cheng <chenglang@huawei.com>
>>
>> Use type map table to reduce the cyclomatic complexity.
>>
>> Signed-off-by: Lang Cheng <chenglang@huawei.com>
>> Signed-off-by: Weihang Li <liweihang@huawei.com>
>> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 27 ++++++++++++++-------------
>> 1 file changed, 14 insertions(+), 13 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> index 6816278..e938bd8 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> @@ -4540,19 +4540,20 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
>> return ret;
>> }
>>
>> -static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
>> -{
>> - switch (state) {
>> - case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
>> - case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
>> - case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
>> - case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
>> - case HNS_ROCE_QP_ST_SQ_DRAINING:
>> - case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
>> - case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
>> - case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
>> - default: return -1;
>> - }
>> +static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
>> +{
>> + const enum ib_qp_state map[] = {
>
> Should be static
>
> Jason
>
OK, thank you.
Weihang
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH for-next 4/6] RDMA/hns: Simplify the cqe code of poll cq
2020-04-13 6:40 [PATCH for-next 0/6] RDMA/hns: Codes optimization Weihang Li
` (2 preceding siblings ...)
2020-04-13 6:40 ` [PATCH for-next 3/6] RDMA/hns: Simplify the qp state convert code Weihang Li
@ 2020-04-13 6:40 ` Weihang Li
2020-04-14 12:56 ` Jason Gunthorpe
2020-04-13 6:40 ` [PATCH for-next 5/6] RDMA/hns: Simplify the state judgment code of qp Weihang Li
2020-04-13 6:40 ` [PATCH for-next 6/6] RDMA/hns: Simplify the status judgment code of hns_roce_v1_m_qp() Weihang Li
5 siblings, 1 reply; 11+ messages in thread
From: Weihang Li @ 2020-04-13 6:40 UTC (permalink / raw)
To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm
From: Lang Cheng <chenglang@huawei.com>
Encapsulate codes to get status of cqe into a function and use map table
instead of switch-case to reduce cyclomatic complexity of
hns_roce_v2_poll_one().
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 130 +++++++++++++----------------
1 file changed, 57 insertions(+), 73 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index e938bd8..c2d2c9e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2954,6 +2954,61 @@ static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
return npolled;
}
+static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
+{
+ static struct {
+ u32 cqe_status;
+ enum ib_wc_status wc_status;
+ } map[] = {
+ { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
+ { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
+ { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
+ { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
+ { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
+ { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
+ IB_WC_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
+ };
+
+ u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
+ V2_CQE_BYTE_4_STATUS_S);
+ int i;
+
+ wc->status = IB_WC_GENERAL_ERR;
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (cqe_status == map[i].cqe_status) {
+ wc->status = map[i].wc_status;
+ break;
+ }
+
+ if (wc->status == IB_WC_SUCCESS || wc->status == IB_WC_WR_FLUSH_ERR)
+ return;
+
+ ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ sizeof(*cqe), false);
+
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
+ * into errored mode. Hence, as a workaround to this hardware
+ * limitation, driver needs to assist in flushing. But the flushing
+ * operation uses mailbox to convey the QP state to the hardware and
+ * which can sleep due to the mutex protection around the mailbox calls.
+ * Hence, use the deferred flush for now. Once wc error detected, the
+ * flushing operation is needed.
+ */
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -2965,7 +3020,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
int is_send;
u16 wqe_ctr;
u32 opcode;
- u32 status;
int qpn;
int ret;
@@ -2995,7 +3049,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
*cur_qp = hr_qp;
}
- hr_qp = *cur_qp;
wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0;
@@ -3030,77 +3083,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
++wq->tail;
}
- status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
- V2_CQE_BYTE_4_STATUS_S);
- switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
- case HNS_ROCE_CQE_V2_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
- wc->status = IB_WC_LOC_QP_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- case HNS_ROCE_CQE_V2_MW_BIND_ERR:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
- wc->status = IB_WC_BAD_RESP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
- wc->status = IB_WC_REM_INV_REQ_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
- wc->status = IB_WC_REM_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
- wc->status = IB_WC_REM_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
- wc->status = IB_WC_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
- wc->status = IB_WC_REM_ABORT_ERR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /*
- * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
- * into errored mode. Hence, as a workaround to this hardware
- * limitation, driver needs to assist in flushing. But the flushing
- * operation uses mailbox to convey the QP state to the hardware and
- * which can sleep due to the mutex protection around the mailbox calls.
- * Hence, use the deferred flush for now. Once wc error detected, the
- * flushing operation is needed.
- */
- if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR) {
- ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
- status & HNS_ROCE_V2_CQE_STATUS_MASK);
-
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
- init_flush_work(hr_dev, hr_qp);
-
- return 0;
- }
-
- if (wc->status == IB_WC_WR_FLUSH_ERR)
+ get_cqe_status(hr_dev, *cur_qp, cqe, wc);
+ if (wc->status != IB_WC_SUCCESS)
return 0;
if (is_send) {
--
2.8.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH for-next 4/6] RDMA/hns: Simplify the cqe code of poll cq
2020-04-13 6:40 ` [PATCH for-next 4/6] RDMA/hns: Simplify the cqe code of poll cq Weihang Li
@ 2020-04-14 12:56 ` Jason Gunthorpe
2020-04-15 1:15 ` liweihang
0 siblings, 1 reply; 11+ messages in thread
From: Jason Gunthorpe @ 2020-04-14 12:56 UTC (permalink / raw)
To: Weihang Li; +Cc: dledford, leon, linux-rdma, linuxarm
On Mon, Apr 13, 2020 at 02:40:40PM +0800, Weihang Li wrote:
> From: Lang Cheng <chenglang@huawei.com>
>
> Encapsulate codes to get status of cqe into a function and use map table
> instead of switch-case to reduce cyclomatic complexity of
> hns_roce_v2_poll_one().
>
> Signed-off-by: Lang Cheng <chenglang@huawei.com>
> Signed-off-by: Weihang Li <liweihang@huawei.com>
> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 130 +++++++++++++----------------
> 1 file changed, 57 insertions(+), 73 deletions(-)
>
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index e938bd8..c2d2c9e 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -2954,6 +2954,61 @@ static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
> return npolled;
> }
>
> +static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
> + struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
> +{
> + static struct {
> + u32 cqe_status;
> + enum ib_wc_status wc_status;
> + } map[] = {
Also should be const
Jason
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH for-next 4/6] RDMA/hns: Simplify the cqe code of poll cq
2020-04-14 12:56 ` Jason Gunthorpe
@ 2020-04-15 1:15 ` liweihang
0 siblings, 0 replies; 11+ messages in thread
From: liweihang @ 2020-04-15 1:15 UTC (permalink / raw)
To: Jason Gunthorpe
Cc: dledford@redhat.com, leon@kernel.org, linux-rdma@vger.kernel.org,
Linuxarm
On 2020/4/14 20:56, Jason Gunthorpe wrote:
> On Mon, Apr 13, 2020 at 02:40:40PM +0800, Weihang Li wrote:
>> From: Lang Cheng <chenglang@huawei.com>
>>
>> Encapsulate codes to get status of cqe into a function and use map table
>> instead of switch-case to reduce cyclomatic complexity of
>> hns_roce_v2_poll_one().
>>
>> Signed-off-by: Lang Cheng <chenglang@huawei.com>
>> Signed-off-by: Weihang Li <liweihang@huawei.com>
>> drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 130 +++++++++++++----------------
>> 1 file changed, 57 insertions(+), 73 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> index e938bd8..c2d2c9e 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> @@ -2954,6 +2954,61 @@ static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
>> return npolled;
>> }
>>
>> +static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
>> + struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
>> +{
>> + static struct {
>> + u32 cqe_status;
>> + enum ib_wc_status wc_status;
>> + } map[] = {
>
> Also should be const
>
> Jason
>
I see, thanks for your reminder.
Weihang
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH for-next 5/6] RDMA/hns: Simplify the state judgment code of qp
2020-04-13 6:40 [PATCH for-next 0/6] RDMA/hns: Codes optimization Weihang Li
` (3 preceding siblings ...)
2020-04-13 6:40 ` [PATCH for-next 4/6] RDMA/hns: Simplify the cqe code of poll cq Weihang Li
@ 2020-04-13 6:40 ` Weihang Li
2020-04-13 6:40 ` [PATCH for-next 6/6] RDMA/hns: Simplify the status judgment code of hns_roce_v1_m_qp() Weihang Li
5 siblings, 0 replies; 11+ messages in thread
From: Weihang Li @ 2020-04-13 6:40 UTC (permalink / raw)
To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm
From: Lang Cheng <chenglang@huawei.com>
Use state table to make the qp state migrate code more readable.
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 54 +++++++++++++++---------------
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c2d2c9e..7949591 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4078,21 +4078,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
-static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
-
- if ((cur_state != IB_QPS_RESET &&
- (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
- ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
- (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
- (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
- return true;
-
- return false;
-
-}
-
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4196,6 +4181,28 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
return 0;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4207,6 +4214,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
+ return -EINVAL;
+ }
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
@@ -4217,23 +4229,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
- } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
- /* Nothing */
- ;
- } else {
- ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
- ret = -EINVAL;
- goto out;
}
-out:
return ret;
}
--
2.8.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH for-next 6/6] RDMA/hns: Simplify the status judgment code of hns_roce_v1_m_qp()
2020-04-13 6:40 [PATCH for-next 0/6] RDMA/hns: Codes optimization Weihang Li
` (4 preceding siblings ...)
2020-04-13 6:40 ` [PATCH for-next 5/6] RDMA/hns: Simplify the state judgment code of qp Weihang Li
@ 2020-04-13 6:40 ` Weihang Li
5 siblings, 0 replies; 11+ messages in thread
From: Weihang Li @ 2020-04-13 6:40 UTC (permalink / raw)
To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm
From: Lang Cheng <chenglang@huawei.com>
Use status table to reduce cyclomatic complexity.
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 42 +++++++++++++++++++++---------
1 file changed, 30 insertions(+), 12 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5ff028d..697f94b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -2704,6 +2704,28 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return -EINVAL;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
@@ -2725,6 +2747,13 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
u8 *dmac;
u8 *smac;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(ibqp->device,
+ "not support QP(%u) status from %d to %d\n",
+ ibqp->qp_num, cur_state, new_state);
+ return -EINVAL;
+ }
+
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -3062,8 +3091,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_SL_S,
rdma_ah_get_sl(&attr->ah_attr));
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- } else if (cur_state == IB_QPS_RTR &&
- new_state == IB_QPS_RTS) {
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
/* If exist optional param, return error */
if ((attr_mask & IB_QP_ALT_PATH) ||
(attr_mask & IB_QP_ACCESS_FLAGS) ||
@@ -3235,16 +3263,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0);
- } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
- dev_err(dev, "not support this status migration\n");
- goto out;
}
/* Every status migrate must change state */
--
2.8.1
^ permalink raw reply related [flat|nested] 11+ messages in thread