From: Alex Williamson <alex.williamson@redhat.com>
To: qemu-devel@nongnu.org
Cc: alex.williamson@redhat.com, avihaih@nvidia.com, clg@redhat.com,
joao.m.martins@oracle.com
Subject: [PULL 12/17] vfio/common: Add device dirty page bitmap sync
Date: Tue, 07 Mar 2023 11:53:27 -0700 [thread overview]
Message-ID: <167821520784.619792.11371797274291565.stgit@omen> (raw)
In-Reply-To: <167821508699.619792.1719671327865445814.stgit@omen>
From: Joao Martins <joao.m.martins@oracle.com>
Add device dirty page bitmap sync functionality. This uses the device
DMA logging uAPI to sync dirty page bitmap from the device.
Device dirty page bitmap sync is used only if all devices within a
container support device dirty page tracking.
Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Link: https://lore.kernel.org/r/20230307125450.62409-13-joao.m.martins@oracle.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
hw/vfio/common.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 79 insertions(+), 9 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index ae81af6d3269..334c62585829 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -339,6 +339,9 @@ static int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
return 0;
}
+static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr);
+
bool vfio_mig_active(void)
{
VFIOGroup *group;
@@ -562,10 +565,16 @@ static int vfio_dma_unmap(VFIOContainer *container,
.iova = iova,
.size = size,
};
+ bool need_dirty_sync = false;
+ int ret;
+
+ if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
+ if (!vfio_devices_all_device_dirty_tracking(container) &&
+ container->dirty_pages_supported) {
+ return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
+ }
- if (iotlb && container->dirty_pages_supported &&
- vfio_devices_all_running_and_mig_active(container)) {
- return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
+ need_dirty_sync = true;
}
while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
@@ -591,10 +600,12 @@ static int vfio_dma_unmap(VFIOContainer *container,
return -errno;
}
- if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
- cpu_physical_memory_set_dirty_range(iotlb->translated_addr, size,
- tcg_enabled() ? DIRTY_CLIENTS_ALL :
- DIRTY_CLIENTS_NOCODE);
+ if (need_dirty_sync) {
+ ret = vfio_get_dirty_bitmap(container, iova, size,
+ iotlb->translated_addr);
+ if (ret) {
+ return ret;
+ }
}
return 0;
@@ -1595,6 +1606,58 @@ static void vfio_listener_log_global_stop(MemoryListener *listener)
}
}
+static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
+ hwaddr size, void *bitmap)
+{
+ uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
+ sizeof(struct vfio_device_feature_dma_logging_report),
+ sizeof(__u64))] = {};
+ struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
+ struct vfio_device_feature_dma_logging_report *report =
+ (struct vfio_device_feature_dma_logging_report *)feature->data;
+
+ report->iova = iova;
+ report->length = size;
+ report->page_size = qemu_real_host_page_size();
+ report->bitmap = (__u64)(uintptr_t)bitmap;
+
+ feature->argsz = sizeof(buf);
+ feature->flags = VFIO_DEVICE_FEATURE_GET |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT;
+
+ if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
+ return -errno;
+ }
+
+ return 0;
+}
+
+static int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
+ VFIOBitmap *vbmap, hwaddr iova,
+ hwaddr size)
+{
+ VFIODevice *vbasedev;
+ VFIOGroup *group;
+ int ret;
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ ret = vfio_device_dma_logging_report(vbasedev, iova, size,
+ vbmap->bitmap);
+ if (ret) {
+ error_report("%s: Failed to get DMA logging report, iova: "
+ "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
+ ", err: %d (%s)",
+ vbasedev->name, iova, size, ret, strerror(-ret));
+
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
hwaddr iova, hwaddr size)
{
@@ -1635,10 +1698,12 @@ static int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
uint64_t size, ram_addr_t ram_addr)
{
+ bool all_device_dirty_tracking =
+ vfio_devices_all_device_dirty_tracking(container);
VFIOBitmap vbmap;
int ret;
- if (!container->dirty_pages_supported) {
+ if (!container->dirty_pages_supported && !all_device_dirty_tracking) {
cpu_physical_memory_set_dirty_range(ram_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
@@ -1650,7 +1715,12 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
return ret;
}
- ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
+ if (all_device_dirty_tracking) {
+ ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size);
+ } else {
+ ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
+ }
+
if (ret) {
goto out;
}
next prev parent reply other threads:[~2023-03-07 18:54 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-07 18:52 [PULL 00/17] VFIO updates for 8.0 Alex Williamson
2023-03-07 18:52 ` [PULL 01/17] vfio/common: Fix error reporting in vfio_get_dirty_bitmap() Alex Williamson
2023-03-07 18:52 ` [PULL 02/17] vfio/common: Fix wrong %m usages Alex Williamson
2023-03-07 18:52 ` [PULL 03/17] vfio/common: Abort migration if dirty log start/stop/sync fails Alex Williamson
2023-03-07 18:52 ` [PULL 04/17] vfio/common: Add VFIOBitmap and alloc function Alex Williamson
2023-03-07 18:52 ` [PULL 05/17] vfio/common: Add helper to validate iova/end against hostwin Alex Williamson
2023-03-07 18:52 ` [PULL 06/17] vfio/common: Use a single tracepoint for skipped sections Alex Williamson
2023-03-07 18:52 ` [PULL 07/17] vfio/common: Consolidate skip/invalid section into helper Alex Williamson
2023-03-07 18:53 ` [PULL 08/17] vfio/common: Add helper to consolidate iova/end calculation Alex Williamson
2023-03-07 18:53 ` [PULL 09/17] vfio/common: Record DMA mapped IOVA ranges Alex Williamson
2023-03-07 18:53 ` [PULL 10/17] vfio/common: Add device dirty page tracking start/stop Alex Williamson
2023-03-07 18:53 ` [PULL 11/17] vfio/common: Extract code from vfio_get_dirty_bitmap() to new function Alex Williamson
2023-03-07 18:53 ` Alex Williamson [this message]
2023-03-07 18:53 ` [PULL 13/17] vfio/migration: Block migration with vIOMMU Alex Williamson
2023-03-07 18:53 ` [PULL 14/17] vfio/migration: Query device dirty page tracking support Alex Williamson
2023-03-07 18:53 ` [PULL 15/17] docs/devel: Document VFIO device dirty page tracking Alex Williamson
2023-03-07 18:53 ` [PULL 16/17] vfio/migration: Rename entry points Alex Williamson
2023-03-07 18:53 ` [PULL 17/17] vfio: Fix vfio_get_dev_region() trace event Alex Williamson
2023-03-09 16:51 ` [PULL 00/17] VFIO updates for 8.0 Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=167821520784.619792.11371797274291565.stgit@omen \
--to=alex.williamson@redhat.com \
--cc=avihaih@nvidia.com \
--cc=clg@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).