linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
	linux-rdma@vger.kernel.org, Maor Gottlieb <maorg@mellanox.com>
Subject: [PATCH rdma-next 10/18] RDMA/mlx5: Separate to user/kernel create QP flows
Date: Thu, 23 Apr 2020 22:02:55 +0300	[thread overview]
Message-ID: <20200423190303.12856-11-leon@kernel.org> (raw)
In-Reply-To: <20200423190303.12856-1-leon@kernel.org>

From: Leon Romanovsky <leonro@mellanox.com>

The kernel and user create QP flows have very little common code,
separate them to simplify the future work of creating per-type
create_*_qp() functions.

Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/qp.c | 205 ++++++++++++++++++++++++--------
 1 file changed, 156 insertions(+), 49 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e00a51d3e17e..f667abf3f461 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -911,12 +911,12 @@ static int adjust_bfregn(struct mlx5_ib_dev *dev,
 				bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
 }
 
-static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
-			  struct mlx5_ib_qp *qp, struct ib_udata *udata,
-			  struct ib_qp_init_attr *attr, u32 **in,
-			  struct mlx5_ib_create_qp_resp *resp, int *inlen,
-			  struct mlx5_ib_qp_base *base,
-			  struct mlx5_ib_create_qp *ucmd)
+static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+			   struct mlx5_ib_qp *qp, struct ib_udata *udata,
+			   struct ib_qp_init_attr *attr, u32 **in,
+			   struct mlx5_ib_create_qp_resp *resp, int *inlen,
+			   struct mlx5_ib_qp_base *base,
+			   struct mlx5_ib_create_qp *ucmd)
 {
 	struct mlx5_ib_ucontext *context;
 	struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
@@ -1083,11 +1083,10 @@ static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
 	return fragment_end + MLX5_SEND_WQE_BB;
 }
 
-static int create_kernel_qp(struct mlx5_ib_dev *dev,
-			    struct ib_qp_init_attr *init_attr,
-			    struct mlx5_ib_qp *qp,
-			    u32 **in, int *inlen,
-			    struct mlx5_ib_qp_base *base)
+static int _create_kernel_qp(struct mlx5_ib_dev *dev,
+			     struct ib_qp_init_attr *init_attr,
+			     struct mlx5_ib_qp *qp, u32 **in, int *inlen,
+			     struct mlx5_ib_qp_base *base)
 {
 	int uar_index;
 	void *qpc;
@@ -1987,11 +1986,11 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev,
 	return 0;
 }
 
-static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
-			    struct ib_qp_init_attr *init_attr,
-			    struct mlx5_ib_create_qp *ucmd,
-			    struct ib_udata *udata, struct mlx5_ib_qp *qp,
-			    u32 uidx)
+static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+			  struct ib_qp_init_attr *init_attr,
+			  struct mlx5_ib_create_qp *ucmd,
+			  struct ib_udata *udata, struct mlx5_ib_qp *qp,
+			  u32 uidx)
 {
 	struct mlx5_ib_resources *devr = &dev->devr;
 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
@@ -2032,28 +2031,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 		return err;
 	}
 
-	if (udata) {
-		__u32 max_wqes = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+	if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+	    ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+		return -EINVAL;
 
-		mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n",
-			    ucmd->sq_wqe_count);
-		if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
-		    ucmd->rq_wqe_count != qp->rq.wqe_cnt) {
-			mlx5_ib_dbg(dev, "invalid rq params\n");
-			return -EINVAL;
-		}
-		if (ucmd->sq_wqe_count > max_wqes) {
-			mlx5_ib_dbg(
-				dev,
-				"requested sq_wqe_count (%d) > max allowed (%d)\n",
-				ucmd->sq_wqe_count, max_wqes);
-			return -EINVAL;
-		}
-		err = create_user_qp(dev, pd, qp, udata, init_attr, &in, &resp,
-				     &inlen, base, ucmd);
-	} else
-		err = create_kernel_qp(dev, init_attr, qp, &in, &inlen, base);
+	if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+		return -EINVAL;
 
+	err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &resp, &inlen,
+			      base, ucmd);
 	if (err)
 		return err;
 
@@ -2064,12 +2050,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 
 	MLX5_SET(qpc, qpc, st, mlx5_st);
 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
-
-	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
-		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
-	else
-		MLX5_SET(qpc, qpc, latency_sensitive, 1);
-
+	MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
 
 	if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
 		MLX5_SET(qpc, qpc, wq_signature, 1);
@@ -2145,10 +2126,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
 		MLX5_SET(qpc, qpc, user_index, uidx);
 
-	/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
-	if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
-		MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
-
 	if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
 	    init_attr->qp_type != IB_QPT_RAW_PACKET) {
 		MLX5_SET(qpc, qpc, end_padding_mode,
@@ -2200,6 +2177,133 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 	return err;
 }
 
+static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+			    struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp,
+			    u32 uidx)
+{
+	struct mlx5_ib_resources *devr = &dev->devr;
+	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+	struct mlx5_core_dev *mdev = dev->mdev;
+	struct mlx5_ib_cq *send_cq;
+	struct mlx5_ib_cq *recv_cq;
+	unsigned long flags;
+	struct mlx5_ib_qp_base *base;
+	int mlx5_st;
+	void *qpc;
+	u32 *in;
+	int err;
+
+	mutex_init(&qp->mutex);
+	spin_lock_init(&qp->sq.lock);
+	spin_lock_init(&qp->rq.lock);
+
+	mlx5_st = to_mlx5_st(qp->type);
+	if (mlx5_st < 0)
+		return -EINVAL;
+
+	if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+	base = &qp->trans_qp.base;
+
+	qp->has_rq = qp_has_rq(attr);
+	err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
+	if (err) {
+		mlx5_ib_dbg(dev, "err %d\n", err);
+		return err;
+	}
+
+	err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
+	if (err)
+		return err;
+
+	if (is_sqp(attr->qp_type))
+		qp->port = attr->port_num;
+
+	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+	MLX5_SET(qpc, qpc, st, mlx5_st);
+	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+
+	if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
+		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
+	else
+		MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
+
+	if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+		MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+	if (qp->rq.wqe_cnt) {
+		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
+	}
+
+	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
+
+	if (qp->sq.wqe_cnt)
+		MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+	else
+		MLX5_SET(qpc, qpc, no_sq, 1);
+
+	if (attr->srq) {
+		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
+		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+			 to_msrq(attr->srq)->msrq.srqn);
+	} else {
+		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+			 to_msrq(devr->s1)->msrq.srqn);
+	}
+
+	if (attr->send_cq)
+		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
+
+	if (attr->recv_cq)
+		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
+
+	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+	/* 0xffffff means we ask to work with cqe version 0 */
+	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+		MLX5_SET(qpc, qpc, user_index, uidx);
+
+	/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+	if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
+		MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+
+	err = mlx5_core_create_qp(dev, &base->mqp, in, inlen);
+	kvfree(in);
+	if (err)
+		goto err_create;
+
+	base->container_mibqp = qp;
+	base->mqp.event = mlx5_ib_qp_event;
+
+	get_cqs(qp->type, attr->send_cq, attr->recv_cq,
+		&send_cq, &recv_cq);
+	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+	mlx5_ib_lock_cqs(send_cq, recv_cq);
+	/* Maintain device to QPs access, needed for further handling via reset
+	 * flow
+	 */
+	list_add_tail(&qp->qps_list, &dev->qp_list);
+	/* Maintain CQ to QPs access, needed for further handling via reset flow
+	 */
+	if (send_cq)
+		list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+	if (recv_cq)
+		list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+	mlx5_ib_unlock_cqs(send_cq, recv_cq);
+	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+	return 0;
+
+err_create:
+	destroy_qp_kernel(dev, qp);
+	return err;
+}
+
 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
@@ -2695,7 +2799,7 @@ static int create_raw_qp(struct ib_pd *pd, struct mlx5_ib_qp *qp,
 	if (attr->rwq_ind_tbl)
 		return create_rss_raw_qp_tir(pd, qp, attr, ucmd, udata);
 
-	return create_qp_common(dev, pd, attr, ucmd, udata, qp, uidx);
+	return create_user_qp(dev, pd, attr, ucmd, udata, qp, uidx);
 }
 
 static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
@@ -2819,8 +2923,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 		err = create_xrc_tgt_qp(dev, init_attr, qp, udata, uidx);
 		break;
 	default:
-		err = create_qp_common(dev, pd, init_attr, ucmd, udata, qp,
-				       uidx);
+		if (udata)
+			err = create_user_qp(dev, pd, init_attr, ucmd, udata,
+					     qp, uidx);
+		else
+			err = create_kernel_qp(dev, pd, init_attr, qp, uidx);
 	}
 	if (err) {
 		mlx5_ib_dbg(dev, "create_qp failed %d\n", err);
-- 
2.25.3


  parent reply	other threads:[~2020-04-23 19:03 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-23 19:02 [PATCH rdma-next 00/18] Refactor mlx5_ib_create_qp (Part II) Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 01/18] RDMA/mlx5: Delete unsupported QP types Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 02/18] RDMA/mlx5: Store QP type in the vendor QP structure Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 03/18] RDMA/mlx5: Promote RSS RAW QP attribute check in higher level Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 04/18] RDMA/mlx5: Combine copy of create QP command in RSS RAW QP Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 05/18] RDMA/mlx5: Remove second user copy in create_user_qp Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 06/18] RDMA/mlx5: Rely on existence of udata to separate kernel/user flows Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 07/18] RDMA/mlx5: Delete impossible inlen check Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 08/18] RDMA/mlx5: Globally parse DEVX UID Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 09/18] RDMA/mlx5: Separate XRC_TGT QP creation from common flow Leon Romanovsky
2020-04-23 19:02 ` Leon Romanovsky [this message]
2020-04-23 19:02 ` [PATCH rdma-next 11/18] RDMA/mlx5: Reduce amount of duplication in QP destroy Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 12/18] RDMA/mlx5: Group all create QP parameters to simplify in-kernel interfaces Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 13/18] RDMA/mlx5: Promote RSS RAW QP flags check to higher level Leon Romanovsky
2020-04-23 19:02 ` [PATCH rdma-next 14/18] RDMA/mlx5: Handle udate outlen checks in one place Leon Romanovsky
2020-04-23 19:03 ` [PATCH rdma-next 15/18] RDMA/mlx5: Copy response to the user " Leon Romanovsky
2020-04-23 19:03 ` [PATCH rdma-next 16/18] RDMA/mlx5: Remove redundant destroy QP call Leon Romanovsky
2020-04-23 19:03 ` [PATCH rdma-next 17/18] RDMA/mlx5: Consolidate into special function all create QP calls Leon Romanovsky
2020-04-23 19:03 ` [PATCH rdma-next 18/18] RDMA/mlx5: Verify that QP is created with RQ or SQ Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200423190303.12856-11-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=dledford@redhat.com \
    --cc=jgg@mellanox.com \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=maorg@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).