From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@nvidia.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
linux-rdma@vger.kernel.org, Mark Zhang <markz@nvidia.com>
Subject: [PATCH rdma-next v2 12/14] RDMA/restrack: Support all QP types
Date: Mon, 7 Sep 2020 15:21:54 +0300 [thread overview]
Message-ID: <20200907122156.478360-13-leon@kernel.org> (raw)
In-Reply-To: <20200907122156.478360-1-leon@kernel.org>
From: Leon Romanovsky <leonro@mellanox.com>
The latest changes in restrack name handling allowed to simplify
the QP creation code to support all types of QPs.
For example XRC QP are presented with inbox rdmatool.
[leonro@vm ~]$ ibv_xsrq_pingpong &
[leonro@vm ~]$ rdma res show qp
link ibp0s9/1 lqpn 0 type SMI state RTS sq-psn 0 comm [ib_core]
link ibp0s9/1 lqpn 1 type GSI state RTS sq-psn 0 comm [ib_core]
link ibp0s9/1 lqpn 7 type UD state RTS sq-psn 0 comm [mlx5_ib]
link ibp0s9/1 lqpn 42 type XRC_TGT state INIT sq-psn 0 path-mig-state MIGRATED comm [ib_uverbs]
link ibp0s9/1 lqpn 43 type XRC_INI state INIT sq-psn 0 path-mig-state MIGRATED pdn 197 pid 419 comm ibv_xsrq_pingpong
Reviewed-by: Mark Zhang <markz@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/core/core_priv.h | 29 ++++++-------------
drivers/infiniband/core/uverbs_cmd.c | 4 +--
drivers/infiniband/core/uverbs_std_types_qp.c | 4 +--
drivers/infiniband/core/verbs.c | 11 +++----
include/rdma/ib_verbs.h | 10 +++++--
5 files changed, 27 insertions(+), 31 deletions(-)
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 7f4bb09efab8..3962a54f9178 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -318,15 +318,12 @@ struct ib_device *ib_device_get_by_index(const struct net *net, u32 index);
void nldev_init(void);
void nldev_exit(void);
-static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
- struct ib_pd *pd,
- struct ib_qp_init_attr *attr,
- struct ib_udata *udata,
- struct ib_uqp_object *uobj)
+static inline struct ib_qp *
+_ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr, struct ib_udata *udata,
+ struct ib_uqp_object *uobj, const char *caller)
{
- enum ib_qp_type qp_type = attr->qp_type;
struct ib_qp *qp;
- bool is_xrc;
int ret;
if (!dev->ops.create_qp)
@@ -348,7 +345,6 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
qp->srq = attr->srq;
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
qp->event_handler = attr->event_handler;
- qp->qp_type = attr->qp_type;
qp->port = attr->port_num;
atomic_set(&qp->usecnt, 0);
@@ -357,18 +353,11 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
INIT_LIST_HEAD(&qp->sig_mrs);
rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
- /*
- * We don't track XRC QPs for now, because they don't have PD
- * and more importantly they are created internaly by driver,
- * see mlx5 create_dev_resources() as an example.
- */
- is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
- if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
- rdma_restrack_parent_name(&qp->res, &pd->res);
- ret = rdma_restrack_add(&qp->res);
- if (ret)
- goto err;
- }
+ WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
+ rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
+ ret = rdma_restrack_add(&qp->res);
+ if (ret)
+ goto err;
return qp;
err:
rdma_restrack_put(&qp->res);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d7c532154bbd..432e42b3c931 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1399,8 +1399,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
- obj);
+ qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata, obj,
+ NULL);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
index 3bf8dcdfe7eb..198c3cd6f8b9 100644
--- a/drivers/infiniband/core/uverbs_std_types_qp.c
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -251,8 +251,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
if (attr.qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
- obj);
+ qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata, obj,
+ NULL);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 7802247c2a67..b3b1805c2e84 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1195,7 +1195,7 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
}
/**
- * ib_create_qp - Creates a kernel QP associated with the specified protection
+ * ib_create_named_qp - Creates a kernel QP associated with the specified protection
* domain.
* @pd: The protection domain associated with the QP.
* @qp_init_attr: A list of initial attributes required to create the
@@ -1204,8 +1204,9 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
*
* NOTE: for user qp use ib_create_qp_user with valid udata!
*/
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr)
+struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller)
{
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp;
@@ -1230,7 +1231,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
+ qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
if (IS_ERR(qp))
return qp;
@@ -1296,7 +1297,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_create_qp);
+EXPORT_SYMBOL(ib_create_named_qp);
static const struct {
int valid;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index ad2f8dfa2e66..73fb9f2455d9 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3684,8 +3684,14 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
bad_recv_wr ? : &dummy);
}
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr);
+struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller);
+static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ return ib_create_named_qp(pd, init_attr, KBUILD_MODNAME);
+}
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
--
2.26.2
next prev parent reply other threads:[~2020-09-07 12:23 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-07 12:21 [PATCH rdma-next v2 00/14] Track memory allocation with restrack DB help Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 01/14] RDMA/cma: Delete from restrack DB after successful destroy Leon Romanovsky
2020-09-17 12:06 ` Jason Gunthorpe
2020-09-17 16:19 ` Leon Romanovsky
2020-09-17 16:32 ` Jason Gunthorpe
2020-09-07 12:21 ` [PATCH rdma-next v2 02/14] RDMA/mlx5: Don't call to restrack recursively Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 03/14] RDMA/mlx4: Provide port number for special QPs Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 04/14] RDMA/restrack: Count references to the verbs objects Leon Romanovsky
2020-09-18 17:00 ` Jason Gunthorpe
2020-09-19 8:30 ` Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 05/14] RDMA/restrack: Simplify restrack tracking in kernel flows Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 06/14] RDMA/restrack: Improve readability in task name management Leon Romanovsky
2020-09-18 23:17 ` Jason Gunthorpe
2020-09-19 8:42 ` Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 07/14] RDMA/cma: Be strict with attaching to CMA device Leon Romanovsky
2020-09-18 23:26 ` Jason Gunthorpe
2020-09-19 9:03 ` Leon Romanovsky
2020-09-24 19:46 ` Jason Gunthorpe
2020-09-07 12:21 ` [PATCH rdma-next v2 08/14] RDMA/core: Allow drivers to disable restrack DB Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 09/14] RDMA/counter: Combine allocation and bind logic Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 10/14] RDMA/restrack: Store all special QPs in restrack DB Leon Romanovsky
2020-09-18 23:30 ` Jason Gunthorpe
2020-09-19 8:27 ` Leon Romanovsky
2020-09-07 12:21 ` [PATCH rdma-next v2 11/14] RDMA/restrack: Make restrack DB mandatory for IB objects Leon Romanovsky
2020-09-18 23:31 ` Jason Gunthorpe
2020-09-19 9:09 ` Leon Romanovsky
2020-09-24 19:50 ` Jason Gunthorpe
2020-09-24 19:59 ` Jason Gunthorpe
2020-09-07 12:21 ` Leon Romanovsky [this message]
2020-09-07 12:21 ` [PATCH rdma-next v2 13/14] RDMA/core: Track device memory MRs Leon Romanovsky
2020-09-24 20:02 ` Jason Gunthorpe
2020-09-07 12:21 ` [PATCH rdma-next v2 14/14] RDMA/restrack: Drop valid restrack field as source of ambiguity Leon Romanovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200907122156.478360-13-leon@kernel.org \
--to=leon@kernel.org \
--cc=dledford@redhat.com \
--cc=jgg@nvidia.com \
--cc=leonro@mellanox.com \
--cc=linux-rdma@vger.kernel.org \
--cc=markz@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).