From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
RDMA mailing list <linux-rdma@vger.kernel.org>,
Yishai Hadas <yishaih@mellanox.com>,
Saeed Mahameed <saeedm@mellanox.com>,
linux-netdev <netdev@vger.kernel.org>
Subject: [PATCH rdma-next v1 08/12] IB/mlx5: Introduce MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD
Date: Tue, 18 Jun 2019 20:15:36 +0300 [thread overview]
Message-ID: <20190618171540.11729-9-leon@kernel.org> (raw)
In-Reply-To: <20190618171540.11729-1-leon@kernel.org>
From: Yishai Hadas <yishaih@mellanox.com>
Introduce MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD and its initial
implementation.
This object is from type class FD and will be used to read DEVX
async events.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/hw/mlx5/devx.c | 112 ++++++++++++++++++++--
include/uapi/rdma/mlx5_user_ioctl_cmds.h | 10 ++
include/uapi/rdma/mlx5_user_ioctl_verbs.h | 4 +
3 files changed, 116 insertions(+), 10 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 80b42d069328..1815ce0f8daf 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -33,6 +33,24 @@ struct devx_async_data {
struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
};
+struct devx_async_event_queue {
+ spinlock_t lock;
+ wait_queue_head_t poll_wait;
+ struct list_head event_list;
+ atomic_t bytes_in_use;
+ u8 is_destroyed:1;
+ u32 flags;
+};
+
+struct devx_async_event_file {
+ struct ib_uobject uobj;
+ struct list_head subscribed_events_list; /* Head of events that are
+ * subscribed to this FD
+ */
+ struct devx_async_event_queue ev_queue;
+ struct mlx5_ib_dev *dev;
+};
+
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj {
struct mlx5_core_dev *mdev;
@@ -1336,27 +1354,21 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
cmd_out, cmd_out_len);
}
-struct devx_async_event_queue {
- spinlock_t lock;
- wait_queue_head_t poll_wait;
- struct list_head event_list;
- atomic_t bytes_in_use;
- u8 is_destroyed:1;
-};
-
struct devx_async_cmd_event_file {
struct ib_uobject uobj;
struct devx_async_event_queue ev_queue;
struct mlx5_async_ctx async_ctx;
};
-static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
+static void devx_init_event_queue(struct devx_async_event_queue *ev_queue,
+ u32 flags)
{
spin_lock_init(&ev_queue->lock);
INIT_LIST_HEAD(&ev_queue->event_list);
init_waitqueue_head(&ev_queue->poll_wait);
atomic_set(&ev_queue->bytes_in_use, 0);
ev_queue->is_destroyed = 0;
+ ev_queue->flags = flags;
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
@@ -1370,11 +1382,38 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
ev_file = container_of(uobj, struct devx_async_cmd_event_file,
uobj);
- devx_init_event_queue(&ev_file->ev_queue);
+ devx_init_event_queue(&ev_file->ev_queue, 0);
mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
return 0;
}
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
+ struct devx_async_event_file *ev_file;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ u32 flags;
+ int err;
+
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
+ MLX5_IB_UAPI_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
+
+ if (err)
+ return err;
+
+ ev_file = container_of(uobj, struct devx_async_event_file,
+ uobj);
+ devx_init_event_queue(&ev_file->ev_queue, flags);
+ INIT_LIST_HEAD(&ev_file->subscribed_events_list);
+ ev_file->dev = dev;
+ return 0;
+}
+
static void devx_query_callback(int status, struct mlx5_async_work *context)
{
struct devx_async_data *async_data =
@@ -1729,6 +1768,32 @@ static const struct file_operations devx_async_cmd_event_fops = {
.llseek = no_llseek,
};
+static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return -EINVAL;
+}
+
+static __poll_t devx_async_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return 0;
+}
+
+static int devx_async_event_close(struct inode *inode, struct file *filp)
+{
+ uverbs_close_fd(filp);
+ return 0;
+}
+
+static const struct file_operations devx_async_event_fops = {
+ .owner = THIS_MODULE,
+ .read = devx_async_event_read,
+ .poll = devx_async_event_poll,
+ .release = devx_async_event_close,
+ .llseek = no_llseek,
+};
+
static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
enum rdma_remove_reason why)
{
@@ -1748,6 +1813,12 @@ static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
return 0;
};
+static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ return 0;
+};
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_UMEM_REG,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
@@ -1913,6 +1984,24 @@ DECLARE_UVERBS_NAMED_OBJECT(
O_RDONLY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
+ enum mlx5_ib_uapi_devx_create_event_channel_flags,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
+ devx_hot_unplug_async_event_file,
+ &devx_async_event_fops, "[devx_async_event]",
+ O_RDONLY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
+
static bool devx_is_supported(struct ib_device *device)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -1933,5 +2022,8 @@ const struct uapi_definition mlx5_ib_devx_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
{},
};
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index d404c951954c..6ad8f4f11ddd 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -127,16 +127,26 @@ enum mlx5_ib_devx_async_cmd_fd_alloc_attrs {
MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
};
+enum mlx5_ib_devx_async_event_fd_alloc_attrs {
+ MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
+};
+
enum mlx5_ib_devx_async_cmd_fd_methods {
MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
};
+enum mlx5_ib_devx_async_event_fd_methods {
+ MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+};
+
enum mlx5_ib_objects {
MLX5_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_OBJECT_DEVX_OBJ,
MLX5_IB_OBJECT_DEVX_UMEM,
MLX5_IB_OBJECT_FLOW_MATCHER,
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
};
enum mlx5_ib_flow_matcher_create_attrs {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index a8f34c237458..57beea4589e4 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -63,5 +63,9 @@ enum mlx5_ib_uapi_dm_type {
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM,
};
+enum mlx5_ib_uapi_devx_create_event_channel_flags {
+ MLX5_IB_UAPI_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA = 1 << 0,
+};
+
#endif
--
2.20.1
next prev parent reply other threads:[~2019-06-18 17:16 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-18 17:15 [PATCH rdma-next v1 00/12] DEVX asynchronous events Leon Romanovsky
2019-06-18 17:15 ` [PATCH mlx5-next v1 01/12] net/mlx5: Fix mlx5_core_destroy_cq() error flow Leon Romanovsky
2019-06-18 17:15 ` [PATCH mlx5-next v1 02/12] net/mlx5: Use event mask based on device capabilities Leon Romanovsky
2019-06-27 0:23 ` Saeed Mahameed
2019-06-18 17:15 ` [PATCH mlx5-next v1 03/12] net/mlx5: Expose the API to register for ANY event Leon Romanovsky
2019-06-18 17:15 ` [PATCH mlx5-next v1 04/12] net/mlx5: mlx5_core_create_cq() enhancements Leon Romanovsky
2019-06-18 17:15 ` [PATCH mlx5-next v1 05/12] net/mlx5: Report a CQ error event only when a handler was set Leon Romanovsky
2019-06-18 17:15 ` [PATCH mlx5-next v1 06/12] net/mlx5: Report EQE data upon CQ completion Leon Romanovsky
2019-06-18 17:15 ` [PATCH mlx5-next v1 07/12] net/mlx5: Expose device definitions for object events Leon Romanovsky
2019-06-18 17:15 ` Leon Romanovsky [this message]
2019-06-24 11:51 ` [PATCH rdma-next v1 08/12] IB/mlx5: Introduce MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD Jason Gunthorpe
2019-06-24 13:25 ` Yishai Hadas
2019-06-24 14:30 ` Jason Gunthorpe
2019-06-18 17:15 ` [PATCH rdma-next v1 09/12] IB/mlx5: Register DEVX with mlx5_core to get async events Leon Romanovsky
2019-06-24 11:52 ` Jason Gunthorpe
2019-06-24 13:36 ` Yishai Hadas
2019-06-24 14:30 ` Jason Gunthorpe
2019-06-18 17:15 ` [PATCH rdma-next v1 10/12] IB/mlx5: Enable subscription for device events over DEVX Leon Romanovsky
2019-06-24 11:57 ` Jason Gunthorpe
2019-06-24 16:13 ` Yishai Hadas
2019-06-24 17:56 ` Jason Gunthorpe
2019-06-18 17:15 ` [PATCH rdma-next v1 11/12] IB/mlx5: Implement DEVX dispatching event Leon Romanovsky
2019-06-24 12:03 ` Jason Gunthorpe
2019-06-24 16:55 ` Yishai Hadas
2019-06-24 18:06 ` Jason Gunthorpe
2019-06-25 14:41 ` Yishai Hadas
2019-06-25 20:23 ` Jason Gunthorpe
2019-06-18 17:15 ` [PATCH rdma-next v1 12/12] IB/mlx5: Add DEVX support for CQ events Leon Romanovsky
2019-06-24 12:04 ` Jason Gunthorpe
2019-06-24 17:03 ` Yishai Hadas
2019-06-24 18:06 ` Jason Gunthorpe
2019-06-18 18:51 ` [PATCH rdma-next v1 00/12] DEVX asynchronous events Saeed Mahameed
2019-06-19 4:45 ` Leon Romanovsky
2019-06-24 21:57 ` Saeed Mahameed
2019-06-30 8:53 ` Leon Romanovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190618171540.11729-9-leon@kernel.org \
--to=leon@kernel.org \
--cc=dledford@redhat.com \
--cc=jgg@mellanox.com \
--cc=leonro@mellanox.com \
--cc=linux-rdma@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=saeedm@mellanox.com \
--cc=yishaih@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).