* [PATCH rdma-next v1 1/6] IB/mlx5: Create profile infrastructure to add and remove stages
[not found] ` <20180101110703.28759-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
@ 2018-01-01 11:06 ` Leon Romanovsky
2018-01-01 11:06 ` [PATCH rdma-next v1 2/6] IB/mlx5: Move RoCE/ETH initialization to the corresponding stage Leon Romanovsky
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Leon Romanovsky @ 2018-01-01 11:06 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Leon Romanovsky, RDMA mailing list, Mark Bloch
From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Today we have single function which is used when we add an IB interface,
break this function into multiple functions.
Create stages and a generic mechanism to execute each stage.
This is in preparation for RDMA/IB representors which might not need
all stages or will do things differently in some of the stages.
This patch doesn't change any functionality.
Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
drivers/infiniband/hw/mlx5/main.c | 344 +++++++++++++++++++++++++----------
drivers/infiniband/hw/mlx5/mlx5_ib.h | 31 ++++
2 files changed, 282 insertions(+), 93 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 1422db847b3e..97c2b6b25ac8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4024,30 +4024,21 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
return mlx5_get_vector_affinity(dev->mdev, comp_vector);
}
-static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
- struct mlx5_ib_dev *dev;
- enum rdma_link_layer ll;
- int port_type_cap;
+ kfree(dev->port);
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
const char *name;
int err;
- int i;
-
- port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
-
- printk_once(KERN_INFO "%s", mlx5_version);
-
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
- if (!dev)
- return NULL;
-
- dev->mdev = mdev;
dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
GFP_KERNEL);
if (!dev->port)
- goto err_dealloc;
+ return -ENOMEM;
rwlock_init(&dev->roce.netdev_lock);
err = get_port_caps(dev);
@@ -4072,6 +4063,24 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ return 0;
+
+err_free_port:
+ kfree(dev->port);
+
+ return -ENOMEM;
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ int err;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -4215,139 +4224,288 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
err = init_node_data(dev);
if (err)
- goto err_free_port;
+ return err;
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
+ return 0;
+}
+
+static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ int err;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_eth(dev);
if (err)
- goto err_free_port;
+ return err;
dev->roce.last_port_state = IB_PORT_DOWN;
}
- err = create_dev_resources(&dev->devr);
- if (err)
- goto err_disable_eth;
+ return 0;
+}
- err = mlx5_ib_odp_init_one(dev);
- if (err)
- goto err_rsrc;
+static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- err = mlx5_ib_alloc_counters(dev);
- if (err)
- goto err_odp;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ mlx5_disable_eth(dev);
+ mlx5_remove_netdev_notifier(dev);
}
+}
- err = mlx5_ib_init_cong_debugfs(dev);
- if (err)
- goto err_cnt;
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_dev_resources(&dev->devr);
+}
+
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_dev_resources(&dev->devr);
+}
+
+static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
+{
+ return mlx5_ib_odp_init_one(dev);
+}
+static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_odp_remove_one(dev);
+}
+
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ return mlx5_ib_alloc_counters(dev);
+
+ return 0;
+}
+
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ mlx5_ib_dealloc_counters(dev);
+}
+
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ return mlx5_ib_init_cong_debugfs(dev);
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_cong_debugfs(dev);
+}
+
+static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+{
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar)
- goto err_cong;
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+ int err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
- goto err_uar_page;
+ return err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- goto err_bfreg;
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- err = ib_register_device(&dev->ib_dev, NULL);
- if (err)
- goto err_fp_bfreg;
+ return err;
+}
- err = create_umr_res(dev);
- if (err)
- goto err_dev;
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+ return ib_register_device(&dev->ib_dev, NULL);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ ib_unregister_device(&dev->ib_dev);
+}
+
+static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_umr_res(dev);
+}
+
+static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_umrc_res(dev);
+}
+
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
init_delay_drop(dev);
+ return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+ cancel_delay_drop(dev);
+}
+
+static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
+{
+ int err;
+ int i;
+
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
err = device_create_file(&dev->ib_dev.dev,
mlx5_class_attributes[i]);
if (err)
- goto err_delay_drop;
+ return err;
}
- if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(mdev, disable_local_lb))
+ return 0;
+}
+
+static int mlx5_ib_stage_loopback_init(struct mlx5_ib_dev *dev)
+{
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb))
mutex_init(&dev->lb_mutex);
- dev->ib_active = true;
+ return 0;
+}
- return dev;
+static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
+{
+ /* Number of stages to cleanup */
+ while (stage) {
+ stage--;
+ if (profile->stage[stage].cleanup)
+ profile->stage[stage].cleanup(dev);
+ }
-err_delay_drop:
- cancel_delay_drop(dev);
- destroy_umrc_res(dev);
+ ib_dealloc_device((struct ib_device *)dev);
+}
-err_dev:
- ib_unregister_device(&dev->ib_dev);
+static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
+ const struct mlx5_ib_profile *profile)
+{
+ struct mlx5_ib_dev *dev;
+ int err;
+ int i;
-err_fp_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ printk_once(KERN_INFO "%s", mlx5_version);
-err_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+ dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev)
+ return NULL;
-err_uar_page:
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+ dev->mdev = mdev;
-err_cong:
- mlx5_ib_cleanup_cong_debugfs(dev);
-err_cnt:
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
+ for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+ if (profile->stage[i].init) {
+ err = profile->stage[i].init(dev);
+ if (err)
+ goto err_out;
+ }
+ }
-err_odp:
- mlx5_ib_odp_remove_one(dev);
+ dev->profile = profile;
+ dev->ib_active = true;
-err_rsrc:
- destroy_dev_resources(&dev->devr);
+ return dev;
-err_disable_eth:
- if (ll == IB_LINK_LAYER_ETHERNET) {
- mlx5_disable_eth(dev);
- mlx5_remove_netdev_notifier(dev);
- }
+err_out:
+ __mlx5_ib_remove(dev, profile, i);
-err_free_port:
- kfree(dev->port);
+ return NULL;
+}
-err_dealloc:
- ib_dealloc_device((struct ib_device *)dev);
+static const struct mlx5_ib_profile pf_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_stage_roce_init,
+ mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_stage_dev_res_init,
+ mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ mlx5_ib_stage_odp_init,
+ mlx5_ib_stage_odp_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_stage_counters_init,
+ mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+ mlx5_ib_stage_umr_res_init,
+ mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ mlx5_ib_stage_class_attr_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_LOOPBACK,
+ mlx5_ib_stage_loopback_init,
+ NULL),
+};
- return NULL;
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+{
+ return __mlx5_ib_add(mdev, &pf_profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
struct mlx5_ib_dev *dev = context;
- enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
- cancel_delay_drop(dev);
- mlx5_remove_netdev_notifier(dev);
- ib_unregister_device(&dev->ib_dev);
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
- mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
- mlx5_ib_cleanup_cong_debugfs(dev);
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
- destroy_umrc_res(dev);
- mlx5_ib_odp_remove_one(dev);
- destroy_dev_resources(&dev->devr);
- if (ll == IB_LINK_LAYER_ETHERNET)
- mlx5_disable_eth(dev);
- kfree(dev->port);
- ib_dealloc_device(&dev->ib_dev);
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
static struct mlx5_interface mlx5_ib_interface = {
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 5fabd5807db6..0504ab3b9a06 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -715,6 +715,36 @@ struct mlx5_ib_delay_drop {
struct mlx5_ib_dbg_delay_drop *dbg;
};
+enum mlx5_ib_stages {
+ MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_ODP,
+ MLX5_IB_STAGE_COUNTERS,
+ MLX5_IB_STAGE_CONG_DEBUGFS,
+ MLX5_IB_STAGE_UAR,
+ MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_IB_REG,
+ MLX5_IB_STAGE_UMR_RESOURCES,
+ MLX5_IB_STAGE_DELAY_DROP,
+ MLX5_IB_STAGE_CLASS_ATTR,
+ MLX5_IB_STAGE_LOOPBACK,
+ MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+ int (*init)(struct mlx5_ib_dev *dev);
+ void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+ .stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+ struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
@@ -753,6 +783,7 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ const struct mlx5_ib_profile *profile;
/* protect the user_td */
struct mutex lb_mutex;
--
2.15.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH rdma-next v1 2/6] IB/mlx5: Move RoCE/ETH initialization to the corresponding stage
[not found] ` <20180101110703.28759-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2018-01-01 11:06 ` [PATCH rdma-next v1 1/6] IB/mlx5: Create " Leon Romanovsky
@ 2018-01-01 11:06 ` Leon Romanovsky
2018-01-01 11:07 ` [PATCH rdma-next v1 3/6] IB/mlx5: Move ODP " Leon Romanovsky
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Leon Romanovsky @ 2018-01-01 11:06 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Leon Romanovsky, RDMA mailing list, Mark Bloch
From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Now that we have a stage just for RoCE/ETH, move all relevant
initialization logic into one place.
Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
drivers/infiniband/hw/mlx5/main.c | 33 ++++++++++++---------------------
1 file changed, 12 insertions(+), 21 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 97c2b6b25ac8..9a5de242b74d 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4074,13 +4074,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- enum rdma_link_layer ll;
- int port_type_cap;
int err;
- port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
-
dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -4119,8 +4114,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
- if (ll == IB_LINK_LAYER_ETHERNET)
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.query_gid = mlx5_ib_query_gid;
dev->ib_dev.add_gid = mlx5_ib_add_gid;
dev->ib_dev.del_gid = mlx5_ib_del_gid;
@@ -4208,20 +4201,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
- if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
- dev->ib_dev.create_wq = mlx5_ib_create_wq;
- dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
- dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
- dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
- dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
- dev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
- }
err = init_node_data(dev);
if (err)
return err;
@@ -4245,6 +4224,18 @@ static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
+ dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
+ dev->ib_dev.create_wq = mlx5_ib_create_wq;
+ dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
+ dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
+ dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
+ dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
+ dev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
err = mlx5_enable_eth(dev);
if (err)
return err;
--
2.15.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH rdma-next v1 3/6] IB/mlx5: Move ODP initialization to the corresponding stage
[not found] ` <20180101110703.28759-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2018-01-01 11:06 ` [PATCH rdma-next v1 1/6] IB/mlx5: Create " Leon Romanovsky
2018-01-01 11:06 ` [PATCH rdma-next v1 2/6] IB/mlx5: Move RoCE/ETH initialization to the corresponding stage Leon Romanovsky
@ 2018-01-01 11:07 ` Leon Romanovsky
2018-01-01 11:07 ` [PATCH rdma-next v1 4/6] IB/mlx5: Move hardware counters " Leon Romanovsky
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Leon Romanovsky @ 2018-01-01 11:07 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Leon Romanovsky, RDMA mailing list, Mark Bloch
From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Now that we have a stage just for ODP, move all relevant
initialization logic into one place.
Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
drivers/infiniband/hw/mlx5/main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9a5de242b74d..07331613f423 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4170,8 +4170,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
- mlx5_ib_internal_fill_odp_caps(dev);
-
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -4272,6 +4270,8 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
{
+ mlx5_ib_internal_fill_odp_caps(dev);
+
return mlx5_ib_odp_init_one(dev);
}
--
2.15.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH rdma-next v1 4/6] IB/mlx5: Move hardware counters initialization to the corresponding stage
[not found] ` <20180101110703.28759-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
` (2 preceding siblings ...)
2018-01-01 11:07 ` [PATCH rdma-next v1 3/6] IB/mlx5: Move ODP " Leon Romanovsky
@ 2018-01-01 11:07 ` Leon Romanovsky
2018-01-01 11:07 ` [PATCH rdma-next v1 5/6] IB/mlx5: Move loopback " Leon Romanovsky
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Leon Romanovsky @ 2018-01-01 11:07 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Leon Romanovsky, RDMA mailing list, Mark Bloch
From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Now that we have a stage just for hardware counters, move all relevant
initialization logic into one place.
Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
drivers/infiniband/hw/mlx5/main.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 07331613f423..1a409e7ade80 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4180,11 +4180,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
- dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
- }
-
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -4282,8 +4277,12 @@ static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
{
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- return mlx5_ib_alloc_counters(dev);
+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+
+ return mlx5_ib_alloc_counters(dev);
+ }
return 0;
}
--
2.15.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH rdma-next v1 5/6] IB/mlx5: Move loopback initialization to the corresponding stage
[not found] ` <20180101110703.28759-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
` (3 preceding siblings ...)
2018-01-01 11:07 ` [PATCH rdma-next v1 4/6] IB/mlx5: Move hardware counters " Leon Romanovsky
@ 2018-01-01 11:07 ` Leon Romanovsky
2018-01-01 11:07 ` [PATCH rdma-next v1 6/6] IB/mlx5: Move locks " Leon Romanovsky
2018-01-04 3:04 ` [PATCH rdma-next v1 0/6] mlx5 profile infrastructure to add and remove stages Jason Gunthorpe
6 siblings, 0 replies; 8+ messages in thread
From: Leon Romanovsky @ 2018-01-01 11:07 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Leon Romanovsky, RDMA mailing list, Mark Bloch
From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
The loopback stage only initializes a lock, move it to be in
the CAPS initialization phase and get rid loopback step completely.
Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
drivers/infiniband/hw/mlx5/main.c | 16 ++++------------
drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 -
2 files changed, 4 insertions(+), 13 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 1a409e7ade80..b8952fff1fc8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4203,6 +4203,10 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ mutex_init(&dev->lb_mutex);
+
return 0;
}
@@ -4384,15 +4388,6 @@ static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
return 0;
}
-static int mlx5_ib_stage_loopback_init(struct mlx5_ib_dev *dev)
-{
- if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(dev->mdev, disable_local_lb))
- mutex_init(&dev->lb_mutex);
-
- return 0;
-}
-
static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
@@ -4481,9 +4476,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
mlx5_ib_stage_class_attr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_LOOPBACK,
- mlx5_ib_stage_loopback_init,
- NULL),
};
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 0504ab3b9a06..768fa7334100 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -729,7 +729,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_UMR_RESOURCES,
MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR,
- MLX5_IB_STAGE_LOOPBACK,
MLX5_IB_STAGE_MAX,
};
--
2.15.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH rdma-next v1 6/6] IB/mlx5: Move locks initialization to the corresponding stage
[not found] ` <20180101110703.28759-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
` (4 preceding siblings ...)
2018-01-01 11:07 ` [PATCH rdma-next v1 5/6] IB/mlx5: Move loopback " Leon Romanovsky
@ 2018-01-01 11:07 ` Leon Romanovsky
2018-01-04 3:04 ` [PATCH rdma-next v1 0/6] mlx5 profile infrastructure to add and remove stages Jason Gunthorpe
6 siblings, 0 replies; 8+ messages in thread
From: Leon Romanovsky @ 2018-01-01 11:07 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Leon Romanovsky, RDMA mailing list, Mark Bloch
From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Unconditional locks/list and ODP srcu initialization should be done in
the INIT stage. Remove those from the CAPS stage and move them to the
proper stage.
Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
drivers/infiniband/hw/mlx5/main.c | 26 +++++++++++++++-----------
drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 --
drivers/infiniband/hw/mlx5/odp.c | 9 ---------
3 files changed, 15 insertions(+), 22 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b8952fff1fc8..b9e195d154b1 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4026,6 +4026,9 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ cleanup_srcu_struct(&dev->mr_srcu);
+#endif
kfree(dev->port);
}
@@ -4063,6 +4066,17 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ mutex_init(&dev->flow_db.lock);
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ err = init_srcu_struct(&dev->mr_srcu);
+ if (err)
+ goto err_free_port;
+#endif
+
return 0;
err_free_port:
@@ -4198,11 +4212,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
if (err)
return err;
- mutex_init(&dev->flow_db.lock);
- mutex_init(&dev->cap_mask_mutex);
- INIT_LIST_HEAD(&dev->qp_list);
- spin_lock_init(&dev->reset_flow_resource_lock);
-
if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
MLX5_CAP_GEN(dev->mdev, disable_local_lb))
mutex_init(&dev->lb_mutex);
@@ -4274,11 +4283,6 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
return mlx5_ib_odp_init_one(dev);
}
-static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
-{
- mlx5_ib_odp_remove_one(dev);
-}
-
static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
@@ -4451,7 +4455,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_dev_res_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_ODP,
mlx5_ib_stage_odp_init,
- mlx5_ib_stage_odp_cleanup),
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
mlx5_ib_stage_counters_init,
mlx5_ib_stage_counters_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 768fa7334100..b3f2f5cae672 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -998,7 +998,6 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
@@ -1013,7 +1012,6 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
}
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
-static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e2197bdda89c..f1a87a690a4c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1207,10 +1207,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret;
- ret = init_srcu_struct(&dev->mr_srcu);
- if (ret)
- return ret;
-
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
if (ret) {
@@ -1222,11 +1218,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
return 0;
}
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *dev)
-{
- cleanup_srcu_struct(&dev->mr_srcu);
-}
-
int mlx5_ib_odp_init(void)
{
mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
--
2.15.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH rdma-next v1 0/6] mlx5 profile infrastructure to add and remove stages
[not found] ` <20180101110703.28759-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
` (5 preceding siblings ...)
2018-01-01 11:07 ` [PATCH rdma-next v1 6/6] IB/mlx5: Move locks " Leon Romanovsky
@ 2018-01-04 3:04 ` Jason Gunthorpe
6 siblings, 0 replies; 8+ messages in thread
From: Jason Gunthorpe @ 2018-01-04 3:04 UTC (permalink / raw)
To: Leon Romanovsky; +Cc: Doug Ledford, RDMA mailing list, Mark Bloch
On Mon, Jan 01, 2018 at 01:06:57PM +0200, Leon Romanovsky wrote:
> Changelog:
> v0->v1:
> * Moved ODP srcu initialization to be part of init stage.
> * Added const to mlx6_ib_profile struct declaration.
> * Remove ODP srcu cleanup callback.
>
> Mark's patches continues e-switch series introduced in shared pull
> request and refactors the mlx5_ib initialization and cleanup logic to work in
> the stages.
>
> Such modular separation provides an easy way to construct any initialization
> sequences and skip unneeded stages as we will have in RDMA/IB representors code.
>
> The patches are available in the git repository at:
> git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma.git tags/rdma-next-2018-01-01
>
> Thanks
>
> Mark Bloch (6):
> IB/mlx5: Create profile infrastructure to add and remove stages
> IB/mlx5: Move RoCE/ETH initialization to the corresponding stage
> IB/mlx5: Move ODP initialization to the corresponding stage
> IB/mlx5: Move hardware counters initialization to the corresponding
> stage
> IB/mlx5: Move loopback initialization to the corresponding stage
> IB/mlx5: Move locks initialization to the corresponding stage
>
> drivers/infiniband/hw/mlx5/main.c | 372 ++++++++++++++++++++++++-----------
> drivers/infiniband/hw/mlx5/mlx5_ib.h | 32 ++-
> drivers/infiniband/hw/mlx5/odp.c | 9 -
> 3 files changed, 288 insertions(+), 125 deletions(-)
Applied to for-next, thanks
Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 8+ messages in thread