From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
RDMA mailing list <linux-rdma@vger.kernel.org>,
Aharon Landau <aharonl@mellanox.com>,
Eli Cohen <eli@mellanox.com>, Maor Gottlieb <maorg@mellanox.com>
Subject: [PATCH rdma-next v1 27/36] RDMA/mlx5: Separate XRC_TGT QP creation from common flow
Date: Mon, 27 Apr 2020 18:46:27 +0300 [thread overview]
Message-ID: <20200427154636.381474-28-leon@kernel.org> (raw)
In-Reply-To: <20200427154636.381474-1-leon@kernel.org>
From: Leon Romanovsky <leonro@mellanox.com>
XRC_TGT QP doesn't fail into kernel or user flow separation. It is
initiated by the user, but is created through in-kernel verbs flow
and doesn't have PD and udata in similar way to kernel QPs.
So let's separate creation of that QP type from the common flow.
Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/hw/mlx5/qp.c | 158 +++++++++++++++++++++-----------
1 file changed, 106 insertions(+), 52 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index b2174e0817f5..8890c172f7e5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -991,8 +991,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
- uid = (attr->qp_type != IB_QPT_XRC_TGT &&
- attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
+ uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
MLX5_SET(create_qp_in, *in, uid, uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
@@ -1913,6 +1912,81 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
return atomic_mode;
}
+static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *attr,
+ struct mlx5_ib_qp *qp, struct ib_udata *udata,
+ u32 uidx)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp_base *base;
+ unsigned long flags;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ mutex_init(&qp->mutex);
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
+
+ MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
+ }
+
+ base = &qp->trans_qp.base;
+ err = mlx5_core_create_qp(dev, &base->mqp, in, inlen);
+ kvfree(in);
+ if (err) {
+ destroy_qp_user(dev, NULL, qp, base, udata);
+ return err;
+ }
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+}
+
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_create_qp *ucmd,
@@ -1958,40 +2032,30 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err;
}
- if (pd) {
- if (udata) {
- __u32 max_wqes =
- 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n",
- ucmd->sq_wqe_count);
- if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
- ucmd->rq_wqe_count != qp->rq.wqe_cnt) {
- mlx5_ib_dbg(dev, "invalid rq params\n");
- return -EINVAL;
- }
- if (ucmd->sq_wqe_count > max_wqes) {
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd->sq_wqe_count, max_wqes);
- return -EINVAL;
- }
- err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
- &resp, &inlen, base, ucmd);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- } else {
- err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
- base);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
+ if (udata) {
+ __u32 max_wqes = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+
+ mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n",
+ ucmd->sq_wqe_count);
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt) {
+ mlx5_ib_dbg(dev, "invalid rq params\n");
+ return -EINVAL;
+ }
+ if (ucmd->sq_wqe_count > max_wqes) {
+ mlx5_ib_dbg(
+ dev,
+ "requested sq_wqe_count (%d) > max allowed (%d)\n",
+ ucmd->sq_wqe_count, max_wqes);
+ return -EINVAL;
}
+ err = create_user_qp(dev, pd, qp, udata, init_attr, &in, &resp,
+ &inlen, base, ucmd);
+ } else
+ err = create_kernel_qp(dev, init_attr, qp, &in, &inlen, base);
- if (err)
- return err;
- } else {
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
- }
+ if (err)
+ return err;
if (is_sqp(init_attr->qp_type))
qp->port = init_attr->port_num;
@@ -2054,12 +2118,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
/* Set default resources */
switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
- MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
- break;
case IB_QPT_XRC_INI:
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
@@ -2105,16 +2163,12 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
&resp);
- } else {
+ } else
err = mlx5_core_create_qp(dev, &base->mqp, in, inlen);
- }
-
- if (err) {
- mlx5_ib_dbg(dev, "create qp failed\n");
- goto err_create;
- }
kvfree(in);
+ if (err)
+ goto err_create;
base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
@@ -2143,7 +2197,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
destroy_qp_user(dev, pd, qp, base, udata);
else
destroy_qp_kernel(dev, qp);
- kvfree(in);
return err;
}
@@ -2750,9 +2803,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
if (err)
goto free_qp;
- if (qp->type == IB_QPT_XRC_TGT)
- xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
-
err = check_qp_attr(dev, qp, init_attr);
if (err)
goto free_qp;
@@ -2764,12 +2814,16 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
case MLX5_IB_QPT_DCT:
err = create_dct(pd, qp, init_attr, ucmd, uidx);
break;
+ case IB_QPT_XRC_TGT:
+ xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
+ err = create_xrc_tgt_qp(dev, init_attr, qp, udata, uidx);
+ break;
default:
err = create_qp_common(dev, pd, init_attr, ucmd, udata, qp,
uidx);
}
if (err) {
- mlx5_ib_dbg(dev, "create_qp_common failed\n");
+ mlx5_ib_dbg(dev, "create_qp failed %d\n", err);
goto free_qp;
}
--
2.25.3
next prev parent reply other threads:[~2020-04-27 15:48 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-27 15:46 [PATCH rdma-next v1 00/36] Refactor mlx5_ib_create_qp Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 01/36] RDMA/mlx5: Organize QP types checks in one place Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 02/36] RDMA/mlx5: Delete impossible GSI port check Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 03/36] RDMA/mlx5: Perform check if QP creation flow is valid Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 04/36] RDMA/mlx5: Prepare QP allocation for future removal Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 05/36] RDMA/mlx5: Avoid setting redundant NULL for XRC QPs Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 06/36] RDMA/mlx5: Set QP subtype immediately when it is known Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 07/36] RDMA/mlx5: Separate create QP flows to be based on type Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 08/36] RDMA/mlx5: Split scatter CQE configuration for DCT QP Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 09/36] RDMA/mlx5: Update all DRIVER QP places to use QP subtype Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 10/36] RDMA/mlx5: Move DRIVER QP flags check into separate function Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 11/36] RDMA/mlx5: Remove second copy from user for non RSS RAW QPs Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 12/36] RDMA/mlx5: Initial separation of RAW_PACKET QP from common flow Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 13/36] RDMA/mlx5: Delete create QP flags obfuscation Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 14/36] RDMA/mlx5: Process create QP flags in one place Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 15/36] RDMA/mlx5: Use flags_en mechanism to mark QP created with WQE signature Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 16/36] RDMA/mlx5: Change scatter CQE flag to be set like other vendor flags Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 17/36] RDMA/mlx5: Return all configured create flags through query QP Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 18/36] RDMA/mlx5: Process all vendor flags in one place Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 19/36] RDMA/mlx5: Delete unsupported QP types Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 20/36] RDMA/mlx5: Store QP type in the vendor QP structure Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 21/36] RDMA/mlx5: Promote RSS RAW QP attribute check in higher level Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 22/36] RDMA/mlx5: Combine copy of create QP command in RSS RAW QP Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 23/36] RDMA/mlx5: Remove second user copy in create_user_qp Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 24/36] RDMA/mlx5: Rely on existence of udata to separate kernel/user flows Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 25/36] RDMA/mlx5: Delete impossible inlen check Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 26/36] RDMA/mlx5: Globally parse DEVX UID Leon Romanovsky
2020-04-27 15:46 ` Leon Romanovsky [this message]
2020-04-27 15:46 ` [PATCH rdma-next v1 28/36] RDMA/mlx5: Separate to user/kernel create QP flows Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 29/36] RDMA/mlx5: Reduce amount of duplication in QP destroy Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 30/36] RDMA/mlx5: Group all create QP parameters to simplify in-kernel interfaces Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 31/36] RDMA/mlx5: Promote RSS RAW QP flags check to higher level Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 32/36] RDMA/mlx5: Handle udate outlen checks in one place Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 33/36] RDMA/mlx5: Copy response to the user " Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 34/36] RDMA/mlx5: Remove redundant destroy QP call Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 35/36] RDMA/mlx5: Consolidate into special function all create QP calls Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 36/36] RDMA/mlx5: Verify that QP is created with RQ or SQ Leon Romanovsky
2020-04-29 0:52 ` [PATCH rdma-next v1 00/36] Refactor mlx5_ib_create_qp Jason Gunthorpe
2020-04-30 23:13 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200427154636.381474-28-leon@kernel.org \
--to=leon@kernel.org \
--cc=aharonl@mellanox.com \
--cc=dledford@redhat.com \
--cc=eli@mellanox.com \
--cc=jgg@mellanox.com \
--cc=leonro@mellanox.com \
--cc=linux-rdma@vger.kernel.org \
--cc=maorg@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).