From: Kamal Heib <kamalheib1@gmail.com>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@ziepe.ca>
Cc: linux-kernel@vger.kernel.org, kamalheib1@gmail.com
Subject: [PATCH rdma-next 09/18] RDMA/mlx5: Initialize ib_device_ops struct
Date: Tue, 9 Oct 2018 19:28:08 +0300 [thread overview]
Message-ID: <20181009162817.4635-10-kamalheib1@gmail.com> (raw)
In-Reply-To: <20181009162817.4635-1-kamalheib1@gmail.com>
Initialize ib_device_ops with the supported operations.
Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
---
drivers/infiniband/hw/mlx5/main.c | 126 +++++++++++++++++++++++++++++++++++++-
1 file changed, 125 insertions(+), 1 deletion(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b3294a7e3ff9..1d2b8f4b2904 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -5760,6 +5760,92 @@ static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
kfree(dev->flow_db);
}
+static struct ib_device_ops mlx5_ib_dev_ops = {
+ .query_device = mlx5_ib_query_device,
+ .get_link_layer = mlx5_ib_port_link_layer,
+ .query_gid = mlx5_ib_query_gid,
+ .add_gid = mlx5_ib_add_gid,
+ .del_gid = mlx5_ib_del_gid,
+ .query_pkey = mlx5_ib_query_pkey,
+ .modify_device = mlx5_ib_modify_device,
+ .modify_port = mlx5_ib_modify_port,
+ .alloc_ucontext = mlx5_ib_alloc_ucontext,
+ .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
+ .mmap = mlx5_ib_mmap,
+ .alloc_pd = mlx5_ib_alloc_pd,
+ .dealloc_pd = mlx5_ib_dealloc_pd,
+ .create_ah = mlx5_ib_create_ah,
+ .query_ah = mlx5_ib_query_ah,
+ .destroy_ah = mlx5_ib_destroy_ah,
+ .create_srq = mlx5_ib_create_srq,
+ .modify_srq = mlx5_ib_modify_srq,
+ .query_srq = mlx5_ib_query_srq,
+ .destroy_srq = mlx5_ib_destroy_srq,
+ .post_srq_recv = mlx5_ib_post_srq_recv,
+ .create_qp = mlx5_ib_create_qp,
+ .modify_qp = mlx5_ib_modify_qp,
+ .query_qp = mlx5_ib_query_qp,
+ .destroy_qp = mlx5_ib_destroy_qp,
+ .drain_sq = mlx5_ib_drain_sq,
+ .drain_rq = mlx5_ib_drain_rq,
+ .post_send = mlx5_ib_post_send,
+ .post_recv = mlx5_ib_post_recv,
+ .create_cq = mlx5_ib_create_cq,
+ .modify_cq = mlx5_ib_modify_cq,
+ .resize_cq = mlx5_ib_resize_cq,
+ .destroy_cq = mlx5_ib_destroy_cq,
+ .poll_cq = mlx5_ib_poll_cq,
+ .req_notify_cq = mlx5_ib_arm_cq,
+ .get_dma_mr = mlx5_ib_get_dma_mr,
+ .reg_user_mr = mlx5_ib_reg_user_mr,
+ .rereg_user_mr = mlx5_ib_rereg_user_mr,
+ .dereg_mr = mlx5_ib_dereg_mr,
+ .attach_mcast = mlx5_ib_mcg_attach,
+ .detach_mcast = mlx5_ib_mcg_detach,
+ .process_mad = mlx5_ib_process_mad,
+ .alloc_mr = mlx5_ib_alloc_mr,
+ .map_mr_sg = mlx5_ib_map_mr_sg,
+ .check_mr_status = mlx5_ib_check_mr_status,
+ .get_dev_fw_str = get_dev_fw_str,
+ .get_vector_affinity = mlx5_ib_get_vector_affinity,
+ .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
+ .create_flow = mlx5_ib_create_flow,
+ .destroy_flow = mlx5_ib_destroy_flow,
+ .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
+ .destroy_flow_action = mlx5_ib_destroy_flow_action,
+ .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
+ .create_counters = mlx5_ib_create_counters,
+ .destroy_counters = mlx5_ib_destroy_counters,
+ .read_counters = mlx5_ib_read_counters,
+};
+
+static struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
+ .alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev,
+};
+
+static struct ib_device_ops mlx5_ib_dev_sriov_ops = {
+ .get_vf_config = mlx5_ib_get_vf_config,
+ .set_vf_link_state = mlx5_ib_set_vf_link_state,
+ .get_vf_stats = mlx5_ib_get_vf_stats,
+ .set_vf_guid = mlx5_ib_set_vf_guid,
+};
+
+static struct ib_device_ops mlx5_ib_dev_mw_ops = {
+ .alloc_mw = mlx5_ib_alloc_mw,
+ .dealloc_mw = mlx5_ib_dealloc_mw,
+};
+
+static struct ib_device_ops mlx5_ib_dev_xrc_ops = {
+ .alloc_xrcd = mlx5_ib_alloc_xrcd,
+ .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
+};
+
+static struct ib_device_ops mlx5_ib_dev_dm_ops = {
+ .alloc_dm = mlx5_ib_alloc_dm,
+ .dealloc_dm = mlx5_ib_dealloc_dm,
+ .reg_dm_mr = mlx5_ib_reg_dm_mr,
+};
+
int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -5847,14 +5933,18 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
- if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
+ ib_set_device_ops(&dev->ib_dev,
+ &mlx5_ib_dev_ipoib_enhanced_ops);
+ }
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
}
dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
@@ -5864,6 +5954,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
if (MLX5_CAP_GEN(mdev, imaicl)) {
dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
dev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
@@ -5872,6 +5963,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
dev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
@@ -5881,6 +5973,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm;
dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm;
dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
}
dev->ib_dev.create_flow = mlx5_ib_create_flow;
@@ -5895,6 +5988,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.create_counters = mlx5_ib_create_counters;
dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
dev->ib_dev.read_counters = mlx5_ib_read_counters;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
err = init_node_data(dev);
if (err)
@@ -5908,22 +6002,45 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
return 0;
}
+static struct ib_device_ops mlx5_ib_dev_port_ops = {
+ .get_port_immutable = mlx5_port_immutable,
+ .query_port = mlx5_ib_query_port,
+};
+
static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
{
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.query_port = mlx5_ib_query_port;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
+
return 0;
}
+static struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
+ .get_port_immutable = mlx5_port_rep_immutable,
+ .query_port = mlx5_ib_rep_query_port,
+};
+
int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
{
dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable;
dev->ib_dev.query_port = mlx5_ib_rep_query_port;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
+
return 0;
}
+static struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
+ .get_netdev = mlx5_ib_get_netdev,
+ .create_wq = mlx5_ib_create_wq,
+ .modify_wq = mlx5_ib_modify_wq,
+ .destroy_wq = mlx5_ib_destroy_wq,
+ .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
+ .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
+};
+
static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
{
u8 port_num;
@@ -5942,6 +6059,7 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
@@ -6041,11 +6159,17 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
return mlx5_ib_odp_init_one(dev);
}
+static struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
+ .get_hw_stats = mlx5_ib_get_hw_stats,
+ .alloc_hw_stats = mlx5_ib_alloc_hw_stats,
+};
+
int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
return mlx5_ib_alloc_counters(dev);
}
--
2.14.4
next prev parent reply other threads:[~2018-10-09 16:29 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-09 16:27 [PATCH rdma-next 00/18] RDMA: Add support for ib_device_ops Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 01/18] RDMA/core: Introduce ib_device_ops Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 02/18] RDMA/bnxt_re: Initialize ib_device_ops struct Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 03/18] RDMA/cxgb3: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 04/18] RDMA/cxgb4: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 05/18] RDMA/hfi1: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 06/18] RDMA/hns: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 07/18] RDMA/i40iw: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 08/18] RDMA/mlx4: " Kamal Heib
2018-10-09 16:28 ` Kamal Heib [this message]
2018-10-09 16:28 ` [PATCH rdma-next 10/18] RDMA/mthca: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 11/18] RDMA/nes: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 12/18] RDMA/ocrdma: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 13/18] RDMA/qedr: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 14/18] RDMA/qib: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 15/18] RDMA/usnic: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 16/18] RDMA/vmw_pvrdma: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 17/18] RDMA/rxe: " Kamal Heib
2018-10-09 16:28 ` [PATCH rdma-next 18/18] RDMA: Start use ib_device_ops Kamal Heib
2018-10-09 18:31 ` [PATCH rdma-next 00/18] RDMA: Add support for ib_device_ops Doug Ledford
2018-10-09 18:44 ` Kamal Heib
[not found] ` <CALEgSQuQyA9JiqaLfC5Un=foTeDHQG6EFJSCqBLTevD1KKKBhA@mail.gmail.com>
2018-10-09 19:01 ` Doug Ledford
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181009162817.4635-10-kamalheib1@gmail.com \
--to=kamalheib1@gmail.com \
--cc=dledford@redhat.com \
--cc=jgg@ziepe.ca \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox