linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
	RDMA mailing list <linux-rdma@vger.kernel.org>,
	Aharon Landau <aharonl@mellanox.com>,
	Eli Cohen <eli@mellanox.com>, Maor Gottlieb <maorg@mellanox.com>
Subject: [PATCH rdma-next v1 18/36] RDMA/mlx5: Process all vendor flags in one place
Date: Mon, 27 Apr 2020 18:46:18 +0300	[thread overview]
Message-ID: <20200427154636.381474-19-leon@kernel.org> (raw)
In-Reply-To: <20200427154636.381474-1-leon@kernel.org>

From: Leon Romanovsky <leonro@mellanox.com>

Check that vendor flags provided through ucmd are valid.

Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/qp.c | 156 +++++++++++++++-----------------
 1 file changed, 71 insertions(+), 85 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f0385965a694..2673678f1899 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1430,13 +1430,6 @@ static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
 	mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
 }
 
-static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
-{
-	return  (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
-		 MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
-		 MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
-}
-
 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
 				      struct mlx5_ib_rq *rq,
 				      u32 qp_flags_en,
@@ -1693,27 +1686,20 @@ static int create_rss_raw_qp_tir(struct ib_pd *pd, struct mlx5_ib_qp *qp,
 		return -EOPNOTSUPP;
 	}
 
-	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
-	    !tunnel_offload_supported(dev->mdev)) {
-		mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
-		return -EOPNOTSUPP;
-	}
-
 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
 	    !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
 		mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
 		return -EOPNOTSUPP;
 	}
 
-	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
-		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+	if (dev->is_rep)
 		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
-	}
 
-	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+	if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+	if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
-		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
-	}
 
 	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
 	if (err) {
@@ -1959,11 +1945,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
 	return atomic_mode;
 }
 
-static inline bool check_flags_mask(uint64_t input, uint64_t supported)
-{
-	return (input & ~supported) == 0;
-}
-
 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 			    struct ib_qp_init_attr *init_attr,
 			    struct mlx5_ib_create_qp *ucmd,
@@ -1999,63 +1980,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
 
 	if (udata) {
-		if (!check_flags_mask(ucmd->flags,
-				      MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
-				      MLX5_QP_FLAG_BFREG_INDEX |
-				      MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
-				      MLX5_QP_FLAG_SCATTER_CQE |
-				      MLX5_QP_FLAG_SIGNATURE |
-				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
-				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
-				      MLX5_QP_FLAG_TUNNEL_OFFLOADS |
-				      MLX5_QP_FLAG_UAR_PAGE_INDEX |
-				      MLX5_QP_FLAG_TYPE_DCI |
-				      MLX5_QP_FLAG_TYPE_DCT))
-			return -EINVAL;
-
 		err = get_qp_user_index(ucontext, ucmd, udata->inlen, &uidx);
 		if (err)
 			return err;
-
-		if (ucmd->flags & MLX5_QP_FLAG_SIGNATURE)
-			qp->flags_en |= MLX5_QP_FLAG_SIGNATURE;
-		if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE &&
-		    MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
-			qp->flags_en |= MLX5_QP_FLAG_SCATTER_CQE;
-
-		if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
-			if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
-			    !tunnel_offload_supported(mdev)) {
-				mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
-		}
-
-		if (ucmd->flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
-			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
-				mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
-		}
-
-		if (ucmd->flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
-			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
-				mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
-		}
-
-		if (ucmd->flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
-			if (init_attr->qp_type != IB_QPT_RC ||
-				!MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
-				mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE;
-		}
 	}
 
 	if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
@@ -2474,7 +2401,7 @@ static int create_dct(struct ib_pd *pd, struct mlx5_ib_qp *qp,
 	MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
 	MLX5_SET(dctc, dctc, user_index, uidx);
 
-	if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE) {
+	if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
 		int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
 
 		if (rcqe_sz == 128)
@@ -2577,22 +2504,81 @@ static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 	return 0;
 }
 
-static int process_vendor_flags(struct mlx5_ib_qp *qp,
+static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+				bool cond, struct mlx5_ib_qp *qp)
+{
+	if (!(*flags & flag))
+		return;
+
+	if (cond) {
+		qp->flags_en |= flag;
+		*flags &= ~flag;
+		return;
+	}
+
+	if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+		/*
+		 * We don't return error if this flag was provided,
+		 * and mlx5 doesn't have right capability.
+		 */
+		*flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+		return;
+	}
+	mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
+}
+
+static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 				struct ib_qp_init_attr *attr,
 				struct mlx5_ib_create_qp *ucmd)
 {
-	switch (ucmd->flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
+	struct mlx5_core_dev *mdev = dev->mdev;
+	int flags = ucmd->flags;
+	bool cond;
+
+	switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
 	case MLX5_QP_FLAG_TYPE_DCI:
 		qp->qp_sub_type = MLX5_IB_QPT_DCI;
 		break;
 	case MLX5_QP_FLAG_TYPE_DCT:
 		qp->qp_sub_type = MLX5_IB_QPT_DCT;
-		break;
+		fallthrough;
 	default:
+		break;
+	}
+
+	if (attr->qp_type == IB_QPT_DRIVER && !qp->qp_sub_type)
 		return -EINVAL;
+
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
+			    MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+
+	if (attr->qp_type == IB_QPT_RAW_PACKET) {
+		cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
+		       MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
+		       MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
+		process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
+				    cond, qp);
+		process_vendor_flag(dev, &flags,
+				    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
+				    qp);
+		process_vendor_flag(dev, &flags,
+				    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
+				    qp);
 	}
 
-	return 0;
+	if (attr->qp_type == IB_QPT_RC)
+		process_vendor_flag(dev, &flags,
+				    MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
+				    MLX5_CAP_GEN(mdev, qp_packet_based), qp);
+
+	if (flags)
+		mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
+
+	return (flags) ? -EINVAL : 0;
 }
 
 static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
@@ -2774,8 +2760,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 	if (!qp)
 		return ERR_PTR(-ENOMEM);
 
-	if (init_attr->qp_type == IB_QPT_DRIVER) {
-		err = process_vendor_flags(qp, init_attr, &ucmd);
+	if (udata) {
+		err = process_vendor_flags(dev, qp, init_attr, &ucmd);
 		if (err)
 			goto free_qp;
 	}
-- 
2.25.3


  parent reply	other threads:[~2020-04-27 15:47 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-27 15:46 [PATCH rdma-next v1 00/36] Refactor mlx5_ib_create_qp Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 01/36] RDMA/mlx5: Organize QP types checks in one place Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 02/36] RDMA/mlx5: Delete impossible GSI port check Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 03/36] RDMA/mlx5: Perform check if QP creation flow is valid Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 04/36] RDMA/mlx5: Prepare QP allocation for future removal Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 05/36] RDMA/mlx5: Avoid setting redundant NULL for XRC QPs Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 06/36] RDMA/mlx5: Set QP subtype immediately when it is known Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 07/36] RDMA/mlx5: Separate create QP flows to be based on type Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 08/36] RDMA/mlx5: Split scatter CQE configuration for DCT QP Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 09/36] RDMA/mlx5: Update all DRIVER QP places to use QP subtype Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 10/36] RDMA/mlx5: Move DRIVER QP flags check into separate function Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 11/36] RDMA/mlx5: Remove second copy from user for non RSS RAW QPs Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 12/36] RDMA/mlx5: Initial separation of RAW_PACKET QP from common flow Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 13/36] RDMA/mlx5: Delete create QP flags obfuscation Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 14/36] RDMA/mlx5: Process create QP flags in one place Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 15/36] RDMA/mlx5: Use flags_en mechanism to mark QP created with WQE signature Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 16/36] RDMA/mlx5: Change scatter CQE flag to be set like other vendor flags Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 17/36] RDMA/mlx5: Return all configured create flags through query QP Leon Romanovsky
2020-04-27 15:46 ` Leon Romanovsky [this message]
2020-04-27 15:46 ` [PATCH rdma-next v1 19/36] RDMA/mlx5: Delete unsupported QP types Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 20/36] RDMA/mlx5: Store QP type in the vendor QP structure Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 21/36] RDMA/mlx5: Promote RSS RAW QP attribute check in higher level Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 22/36] RDMA/mlx5: Combine copy of create QP command in RSS RAW QP Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 23/36] RDMA/mlx5: Remove second user copy in create_user_qp Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 24/36] RDMA/mlx5: Rely on existence of udata to separate kernel/user flows Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 25/36] RDMA/mlx5: Delete impossible inlen check Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 26/36] RDMA/mlx5: Globally parse DEVX UID Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 27/36] RDMA/mlx5: Separate XRC_TGT QP creation from common flow Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 28/36] RDMA/mlx5: Separate to user/kernel create QP flows Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 29/36] RDMA/mlx5: Reduce amount of duplication in QP destroy Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 30/36] RDMA/mlx5: Group all create QP parameters to simplify in-kernel interfaces Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 31/36] RDMA/mlx5: Promote RSS RAW QP flags check to higher level Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 32/36] RDMA/mlx5: Handle udate outlen checks in one place Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 33/36] RDMA/mlx5: Copy response to the user " Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 34/36] RDMA/mlx5: Remove redundant destroy QP call Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 35/36] RDMA/mlx5: Consolidate into special function all create QP calls Leon Romanovsky
2020-04-27 15:46 ` [PATCH rdma-next v1 36/36] RDMA/mlx5: Verify that QP is created with RQ or SQ Leon Romanovsky
2020-04-29  0:52 ` [PATCH rdma-next v1 00/36] Refactor mlx5_ib_create_qp Jason Gunthorpe
2020-04-30 23:13 ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200427154636.381474-19-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=aharonl@mellanox.com \
    --cc=dledford@redhat.com \
    --cc=eli@mellanox.com \
    --cc=jgg@mellanox.com \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=maorg@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).