From: Yishai Hadas <yishaih@nvidia.com>
To: <alex.williamson@redhat.com>, <jgg@nvidia.com>
Cc: <saeedm@nvidia.com>, <kvm@vger.kernel.org>,
<netdev@vger.kernel.org>, <kuba@kernel.org>,
<kevin.tian@intel.com>, <joao.m.martins@oracle.com>,
<leonro@nvidia.com>, <yishaih@nvidia.com>, <maorg@nvidia.com>,
<cohuck@redhat.com>
Subject: [PATCH V2 vfio 08/11] vfio/mlx5: Create and destroy page tracker object
Date: Thu, 14 Jul 2022 11:12:48 +0300 [thread overview]
Message-ID: <20220714081251.240584-9-yishaih@nvidia.com> (raw)
In-Reply-To: <20220714081251.240584-1-yishaih@nvidia.com>
Add support for creating and destroying page tracker object.
This object is used to control/report the device dirty pages.
As part of creating the tracker need to consider the device capabilities
for max ranges and adapt/combine ranges accordingly.
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
---
drivers/vfio/pci/mlx5/cmd.c | 147 ++++++++++++++++++++++++++++++++++++
drivers/vfio/pci/mlx5/cmd.h | 1 +
2 files changed, 148 insertions(+)
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 0a362796d567..f1cad96af6ab 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -410,6 +410,148 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
return err;
}
+static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes)
+{
+ struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
+ unsigned long min_gap;
+ unsigned long curr_gap;
+
+ /* Special shortcut when a single range is required */
+ if (req_nodes == 1) {
+ unsigned long last;
+
+ curr = comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ last = curr->last;
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ if (prev != comb_start)
+ interval_tree_remove(prev, root);
+ }
+ comb_start->last = last;
+ return;
+ }
+
+ /* Combine ranges which have the smallest gap */
+ while (cur_nodes > req_nodes) {
+ prev = NULL;
+ min_gap = ULONG_MAX;
+ curr = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ if (prev) {
+ curr_gap = curr->start - prev->last;
+ if (curr_gap < min_gap) {
+ min_gap = curr_gap;
+ comb_start = prev;
+ comb_end = curr;
+ }
+ }
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ }
+ comb_start->last = comb_end->last;
+ interval_tree_remove(comb_end, root);
+ cur_nodes--;
+ }
+}
+
+static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
+ struct mlx5vf_pci_core_device *mvdev,
+ struct rb_root_cached *ranges, u32 nnodes)
+{
+ int max_num_range =
+ MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_max_num_range);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ int record_size = MLX5_ST_SZ_BYTES(page_track_range);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ struct interval_tree_node *node = NULL;
+ u64 total_ranges_len = 0;
+ u32 num_ranges = nnodes;
+ u8 log_addr_space_size;
+ void *range_list_ptr;
+ void *obj_context;
+ void *cmd_hdr;
+ int inlen;
+ void *in;
+ int err;
+ int i;
+
+ if (num_ranges > max_num_range) {
+ combine_ranges(ranges, nnodes, max_num_range);
+ num_ranges = max_num_range;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_page_track_obj_in) +
+ record_size * num_ranges;
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(create_page_track_obj_in, in,
+ general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type,
+ MLX5_OBJ_TYPE_PAGE_TRACK);
+ obj_context = MLX5_ADDR_OF(create_page_track_obj_in, in, obj_context);
+ MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id);
+ MLX5_SET(page_track, obj_context, track_type, 1);
+ MLX5_SET(page_track, obj_context, log_page_size,
+ ilog2(tracker->host_qp->tracked_page_size));
+ MLX5_SET(page_track, obj_context, log_msg_size,
+ ilog2(tracker->host_qp->max_msg_size));
+ MLX5_SET(page_track, obj_context, reporting_qpn, tracker->fw_qp->qpn);
+ MLX5_SET(page_track, obj_context, num_ranges, num_ranges);
+
+ range_list_ptr = MLX5_ADDR_OF(page_track, obj_context, track_range);
+ node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
+ for (i = 0; i < num_ranges; i++) {
+ void *addr_range_i_base = range_list_ptr + record_size * i;
+ unsigned long length = node->last - node->start;
+
+ MLX5_SET64(page_track_range, addr_range_i_base, start_address,
+ node->start);
+ MLX5_SET64(page_track_range, addr_range_i_base, length, length);
+ total_ranges_len += length;
+ node = interval_tree_iter_next(node, 0, ULONG_MAX);
+ }
+
+ WARN_ON(node);
+ log_addr_space_size = ilog2(total_ranges_len);
+ if (log_addr_space_size <
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_min_addr_space)) ||
+ log_addr_space_size >
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_addr_space))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ MLX5_SET(page_track, obj_context, log_addr_space_size,
+ log_addr_space_size);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ tracker->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ kfree(in);
+ return err;
+}
+
+static int mlx5vf_cmd_destroy_tracker(struct mlx5_core_dev *mdev,
+ u32 tracker_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, tracker_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev,
struct mlx5_vhca_cq_buf *buf, int nent,
int cqe_size)
@@ -833,6 +975,7 @@ _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
WARN_ON(mvdev->mdev_detach);
+ mlx5vf_cmd_destroy_tracker(mdev, tracker->id);
mlx5vf_destroy_qp(mdev, tracker->fw_qp);
mlx5vf_free_qp_recv_resources(mdev, tracker->host_qp);
mlx5vf_destroy_qp(mdev, tracker->host_qp);
@@ -941,6 +1084,10 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
tracker->host_qp = host_qp;
tracker->fw_qp = fw_qp;
+ err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes);
+ if (err)
+ goto err_activate;
+
*page_size = host_qp->tracked_page_size;
mvdev->log_active = true;
mlx5vf_state_mutex_unlock(mvdev);
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index e71ec017bf04..658925ba5459 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -80,6 +80,7 @@ struct mlx5_vhca_qp {
};
struct mlx5_vhca_page_tracker {
+ u32 id;
u32 pdn;
struct mlx5_uars_page *uar;
struct mlx5_vhca_cq cq;
--
2.18.1
next prev parent reply other threads:[~2022-07-14 8:15 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-14 8:12 [PATCH V2 vfio 00/11] Add device DMA logging support for mlx5 driver Yishai Hadas
2022-07-14 8:12 ` [PATCH V2 vfio 01/11] net/mlx5: Introduce ifc bits for page tracker Yishai Hadas
2022-07-21 8:28 ` Tian, Kevin
2022-07-21 8:43 ` Yishai Hadas
2022-07-14 8:12 ` [PATCH V2 vfio 02/11] net/mlx5: Query ADV_VIRTUALIZATION capabilities Yishai Hadas
2022-07-14 8:12 ` [PATCH V2 vfio 03/11] vfio: Introduce DMA logging uAPIs Yishai Hadas
2022-07-18 22:29 ` Alex Williamson
2022-07-19 1:39 ` Tian, Kevin
2022-07-19 5:40 ` Kirti Wankhede
2022-07-19 7:49 ` Yishai Hadas
2022-07-19 19:57 ` Alex Williamson
2022-07-19 20:18 ` Jason Gunthorpe
2022-07-21 8:45 ` Tian, Kevin
2022-07-21 12:05 ` Jason Gunthorpe
2022-07-25 7:20 ` Tian, Kevin
2022-07-25 14:33 ` Jason Gunthorpe
2022-07-26 7:07 ` Tian, Kevin
[not found] ` <56bd06d3-944c-18da-86ed-ae14ce5940b7@nvidia.com>
2022-07-25 7:30 ` Tian, Kevin
2022-07-26 8:37 ` Yishai Hadas
2022-07-26 14:03 ` Alex Williamson
2022-07-26 15:04 ` Jason Gunthorpe
2022-07-28 4:05 ` Tian, Kevin
2022-07-28 12:06 ` Jason Gunthorpe
2022-07-29 3:01 ` Tian, Kevin
2022-07-29 14:11 ` Jason Gunthorpe
2022-07-14 8:12 ` [PATCH V2 vfio 04/11] vfio: Move vfio.c to vfio_main.c Yishai Hadas
2022-07-14 8:12 ` [PATCH V2 vfio 05/11] vfio: Add an IOVA bitmap support Yishai Hadas
2022-07-18 22:30 ` Alex Williamson
2022-07-18 22:46 ` Jason Gunthorpe
2022-07-19 19:01 ` Alex Williamson
2022-07-20 1:57 ` Joao Martins
2022-07-20 16:47 ` Alex Williamson
2022-07-20 17:27 ` Jason Gunthorpe
2022-07-20 18:16 ` Joao Martins
2022-07-14 8:12 ` [PATCH V2 vfio 06/11] vfio: Introduce the DMA logging feature support Yishai Hadas
2022-07-18 22:30 ` Alex Williamson
2022-07-19 9:19 ` Yishai Hadas
2022-07-19 19:25 ` Alex Williamson
2022-07-19 20:08 ` Jason Gunthorpe
2022-07-21 8:54 ` Tian, Kevin
2022-07-21 11:50 ` Jason Gunthorpe
2022-07-25 7:38 ` Tian, Kevin
2022-07-25 14:37 ` Jason Gunthorpe
2022-07-26 7:34 ` Tian, Kevin
2022-07-26 15:12 ` Jason Gunthorpe
2022-07-14 8:12 ` [PATCH V2 vfio 07/11] vfio/mlx5: Init QP based resources for dirty tracking Yishai Hadas
2022-07-14 8:12 ` Yishai Hadas [this message]
2022-07-14 8:12 ` [PATCH V2 vfio 09/11] vfio/mlx5: Report dirty pages from tracker Yishai Hadas
2022-07-14 8:12 ` [PATCH V2 vfio 10/11] vfio/mlx5: Manage error scenarios on tracker Yishai Hadas
2022-07-14 8:12 ` [PATCH V2 vfio 11/11] vfio/mlx5: Set the driver DMA logging callbacks Yishai Hadas
2022-07-21 8:26 ` [PATCH V2 vfio 00/11] Add device DMA logging support for mlx5 driver Tian, Kevin
2022-07-21 8:55 ` Yishai Hadas
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220714081251.240584-9-yishaih@nvidia.com \
--to=yishaih@nvidia.com \
--cc=alex.williamson@redhat.com \
--cc=cohuck@redhat.com \
--cc=jgg@nvidia.com \
--cc=joao.m.martins@oracle.com \
--cc=kevin.tian@intel.com \
--cc=kuba@kernel.org \
--cc=kvm@vger.kernel.org \
--cc=leonro@nvidia.com \
--cc=maorg@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=saeedm@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).