From: Weihang Li <liweihang@huawei.com>
To: <dledford@redhat.com>, <jgg@ziepe.ca>
Cc: <leon@kernel.org>, <linux-rdma@vger.kernel.org>, <linuxarm@huawei.com>
Subject: [PATCH v2 for-next 1/7] RDMA/hns: Optimize qp destroy flow
Date: Mon, 10 Feb 2020 17:08:34 +0800 [thread overview]
Message-ID: <1581325720-12975-2-git-send-email-liweihang@huawei.com> (raw)
In-Reply-To: <1581325720-12975-1-git-send-email-liweihang@huawei.com>
From: Xi Wang <wangxi11@huawei.com>
Wrap the duplicate code in hip08 and hip06 qp destruction process as
hns_roce_qp_destroy() to simply the qp destroy flow.
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_device.h | 5 ++--
drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 19 ++-----------
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 39 +--------------------------
drivers/infiniband/hw/hns/hns_roce_qp.c | 41 +++++++++++++++++++++++++++++
4 files changed, 46 insertions(+), 58 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a7c4ff9..1f361e6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1248,9 +1248,8 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
-void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
-void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
- int cnt);
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata);
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index c6e6658..fe37e6f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -3623,26 +3623,11 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (send_cq && send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
- hns_roce_unlock_cqs(send_cq, recv_cq);
-
hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_qp_free(hr_dev, hr_qp);
-
- /* RC QP, release QPN */
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
-
- hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
-
- ib_umem_release(hr_qp->umem);
- if (!udata) {
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
+ hns_roce_unlock_cqs(send_cq, recv_cq);
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- }
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
- kfree(hr_qp);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 12c4cd8..5dbff86 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5032,43 +5032,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
- hns_roce_qp_free(hr_dev, hr_qp);
-
- /* Not special_QP, free their QPN */
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UD))
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
-
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-
- if (udata) {
- struct hns_roce_ucontext *context =
- rdma_udata_to_drv_context(
- udata,
- struct hns_roce_ucontext,
- ibucontext);
-
- if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
- hns_roce_db_unmap_user(context, &hr_qp->sdb);
-
- if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
- hns_roce_db_unmap_user(context, &hr_qp->rdb);
- } else {
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- if (hr_qp->rq.wqe_cnt)
- hns_roce_free_db(hr_dev, &hr_qp->rdb);
- }
- ib_umem_release(hr_qp->umem);
-
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hr_qp->rq.wqe_cnt) {
- kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
- kfree(hr_qp->rq_inl_buf.wqe_list);
- }
-
return ret;
}
@@ -5083,7 +5046,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
- kfree(hr_qp);
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 7c8de1e..748a867 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1035,6 +1035,47 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
return ret;
}
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ hns_roce_qp_free(hr_dev, hr_qp);
+
+ /* Not special_QP, free their QPN */
+ if (hr_qp->ibqp.qp_type != IB_QPT_GSI)
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+
+ hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
+
+ if (udata) {
+ struct hns_roce_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+
+ if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
+ hns_roce_db_unmap_user(context, &hr_qp->sdb);
+
+ if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
+ hns_roce_db_unmap_user(context, &hr_qp->rdb);
+ } else {
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ if (hr_qp->rq.wqe_cnt)
+ hns_roce_free_db(hr_dev, &hr_qp->rdb);
+ }
+ ib_umem_release(hr_qp->umem);
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+ hr_qp->rq.wqe_cnt) {
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ }
+
+ kfree(hr_qp);
+}
+
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
--
2.8.1
next prev parent reply other threads:[~2020-02-10 9:12 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-10 9:08 [PATCH v2 for-next 0/7] RDMA/hns: Refactor qp related code Weihang Li
2020-02-10 9:08 ` Weihang Li [this message]
2020-02-10 9:08 ` [PATCH v2 for-next 2/7] RDMA/hns: Optimize qp context create and destroy flow Weihang Li
2020-02-10 9:08 ` [PATCH v2 for-next 3/7] RDMA/hns: Optimize qp number assign flow Weihang Li
2020-02-10 9:08 ` [PATCH v2 for-next 4/7] RDMA/hns: Optimize qp buffer allocation flow Weihang Li
2020-02-10 9:08 ` [PATCH v2 for-next 5/7] RDMA/hns: Optimize qp param setup flow Weihang Li
2020-02-10 9:08 ` [PATCH v2 for-next 6/7] RDMA/hns: Optimize kernel qp wrid allocation flow Weihang Li
2020-02-10 9:08 ` [PATCH v2 for-next 7/7] RDMA/hns: Optimize qp doorbell " Weihang Li
2020-02-19 0:52 ` Jason Gunthorpe
2020-02-19 8:14 ` Weihang Li
2020-02-19 13:19 ` Jason Gunthorpe
2020-02-22 7:22 ` Weihang Li
2020-02-22 23:38 ` Jason Gunthorpe
2020-02-24 1:29 ` liweihang
2020-02-19 0:52 ` [PATCH v2 for-next 0/7] RDMA/hns: Refactor qp related code Jason Gunthorpe
2020-02-19 8:22 ` Weihang Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1581325720-12975-2-git-send-email-liweihang@huawei.com \
--to=liweihang@huawei.com \
--cc=dledford@redhat.com \
--cc=jgg@ziepe.ca \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=linuxarm@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).