From: "Cédric Le Goater" <clg@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Eric Auger" <eric.auger@redhat.com>,
"Zhenzhong Duan" <zhenzhong.duan@intel.com>,
"Peter Maydell" <peter.maydell@linaro.org>,
"Richard Henderson" <richard.henderson@linaro.org>,
"Nicholas Piggin" <npiggin@gmail.com>,
"Harsh Prateek Bora" <harshpb@linux.ibm.com>,
"Thomas Huth" <thuth@redhat.com>,
"Eric Farman" <farman@linux.ibm.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Matthew Rosato" <mjrosato@linux.ibm.com>,
"Yi Liu" <yi.l.liu@intel.com>,
"Yi Sun" <yi.y.sun@linux.intel.com>,
"Cédric Le Goater" <clg@redhat.com>
Subject: [PULL 07/47] vfio/container: Switch to IOMMU BE set_dirty_page_tracking/query_dirty_bitmap API
Date: Tue, 19 Dec 2023 19:56:03 +0100 [thread overview]
Message-ID: <20231219185643.725448-8-clg@redhat.com> (raw)
In-Reply-To: <20231219185643.725448-1-clg@redhat.com>
From: Eric Auger <eric.auger@redhat.com>
dirty_pages_supported field is also moved to the base container
No functional change intended.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Signed-off-by: Cédric Le Goater <clg@redhat.com>
---
include/hw/vfio/vfio-common.h | 6 ------
include/hw/vfio/vfio-container-base.h | 6 ++++++
hw/vfio/common.c | 12 ++++++++----
hw/vfio/container-base.c | 16 ++++++++++++++++
hw/vfio/container.c | 21 ++++++++++++++-------
5 files changed, 44 insertions(+), 17 deletions(-)
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index bd4de6cb3aff7cb85e34960fe4238d82b30867cd..60f2785fe07cf71e3c65e85d933536c09170f4c6 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -83,7 +83,6 @@ typedef struct VFIOContainer {
unsigned iommu_type;
Error *error;
bool initialized;
- bool dirty_pages_supported;
uint64_t dirty_pgsizes;
uint64_t max_dirty_bitmap_size;
unsigned long pgsizes;
@@ -190,11 +189,6 @@ VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
void vfio_put_address_space(VFIOAddressSpace *space);
bool vfio_devices_all_running_and_saving(VFIOContainer *container);
-/* container->fd */
-int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start);
-int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
- hwaddr iova, hwaddr size);
-
/* SPAPR specific */
int vfio_container_add_section_window(VFIOContainer *container,
MemoryRegionSection *section,
diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h
index c7cc6ec9c55e93e4f7c6be5576ed59b5dea58b1e..f244f003d080a30e95b6daa97a132eeb5a271ae4 100644
--- a/include/hw/vfio/vfio-container-base.h
+++ b/include/hw/vfio/vfio-container-base.h
@@ -36,6 +36,7 @@ typedef struct VFIOAddressSpace {
typedef struct VFIOContainerBase {
const VFIOIOMMUOps *ops;
VFIOAddressSpace *space;
+ bool dirty_pages_supported;
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_ENTRY(VFIOContainerBase) next;
} VFIOContainerBase;
@@ -54,6 +55,11 @@ int vfio_container_dma_map(VFIOContainerBase *bcontainer,
int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
IOMMUTLBEntry *iotlb);
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+ bool start);
+int vfio_container_query_dirty_bitmap(VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size);
void vfio_container_init(VFIOContainerBase *bcontainer,
VFIOAddressSpace *space,
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 1d8202537ea4f27f49f2d477537d6c7c557bd61f..b1a875ca93a44347f3652298995b89b5d9aed045 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -1079,7 +1079,8 @@ static void vfio_listener_log_global_start(MemoryListener *listener)
if (vfio_devices_all_device_dirty_tracking(container)) {
ret = vfio_devices_dma_logging_start(container);
} else {
- ret = vfio_set_dirty_page_tracking(container, true);
+ ret = vfio_container_set_dirty_page_tracking(&container->bcontainer,
+ true);
}
if (ret) {
@@ -1097,7 +1098,8 @@ static void vfio_listener_log_global_stop(MemoryListener *listener)
if (vfio_devices_all_device_dirty_tracking(container)) {
vfio_devices_dma_logging_stop(container);
} else {
- ret = vfio_set_dirty_page_tracking(container, false);
+ ret = vfio_container_set_dirty_page_tracking(&container->bcontainer,
+ false);
}
if (ret) {
@@ -1165,7 +1167,8 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
VFIOBitmap vbmap;
int ret;
- if (!container->dirty_pages_supported && !all_device_dirty_tracking) {
+ if (!container->bcontainer.dirty_pages_supported &&
+ !all_device_dirty_tracking) {
cpu_physical_memory_set_dirty_range(ram_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
@@ -1180,7 +1183,8 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
if (all_device_dirty_tracking) {
ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size);
} else {
- ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
+ ret = vfio_container_query_dirty_bitmap(&container->bcontainer, &vbmap,
+ iova, size);
}
if (ret) {
diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c
index 3933391e0db26116401509c5917eaceabcd98443..5d654ae17233fe06c235d33d981eb8fa92128536 100644
--- a/hw/vfio/container-base.c
+++ b/hw/vfio/container-base.c
@@ -31,11 +31,27 @@ int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb);
}
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+ bool start)
+{
+ g_assert(bcontainer->ops->set_dirty_page_tracking);
+ return bcontainer->ops->set_dirty_page_tracking(bcontainer, start);
+}
+
+int vfio_container_query_dirty_bitmap(VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size)
+{
+ g_assert(bcontainer->ops->query_dirty_bitmap);
+ return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size);
+}
+
void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space,
const VFIOIOMMUOps *ops)
{
bcontainer->ops = ops;
bcontainer->space = space;
+ bcontainer->dirty_pages_supported = false;
QLIST_INIT(&bcontainer->giommu_list);
}
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index f12fcb6fe11d578838b1052f70ee5099b9018e94..3ab74e26151efd7ae4027ed3b4cf04a835083ccc 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -131,7 +131,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase *bcontainer, hwaddr iova,
if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
if (!vfio_devices_all_device_dirty_tracking(container) &&
- container->dirty_pages_supported) {
+ container->bcontainer.dirty_pages_supported) {
return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
}
@@ -205,14 +205,17 @@ static int vfio_legacy_dma_map(VFIOContainerBase *bcontainer, hwaddr iova,
return -errno;
}
-int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
+static int vfio_legacy_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+ bool start)
{
+ VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
int ret;
struct vfio_iommu_type1_dirty_bitmap dirty = {
.argsz = sizeof(dirty),
};
- if (!container->dirty_pages_supported) {
+ if (!bcontainer->dirty_pages_supported) {
return 0;
}
@@ -232,9 +235,12 @@ int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
return ret;
}
-int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
- hwaddr iova, hwaddr size)
+static int vfio_legacy_query_dirty_bitmap(VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size)
{
+ VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
struct vfio_iommu_type1_dirty_bitmap_get *range;
int ret;
@@ -461,7 +467,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
* qemu_real_host_page_size to mark those dirty.
*/
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
- container->dirty_pages_supported = true;
+ container->bcontainer.dirty_pages_supported = true;
container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
container->dirty_pgsizes = cap_mig->pgsize_bitmap;
}
@@ -553,7 +559,6 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
container = g_malloc0(sizeof(*container));
container->fd = fd;
container->error = NULL;
- container->dirty_pages_supported = false;
container->dma_max_mappings = 0;
container->iova_ranges = NULL;
QLIST_INIT(&container->vrdl_list);
@@ -937,4 +942,6 @@ void vfio_detach_device(VFIODevice *vbasedev)
const VFIOIOMMUOps vfio_legacy_ops = {
.dma_map = vfio_legacy_dma_map,
.dma_unmap = vfio_legacy_dma_unmap,
+ .set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking,
+ .query_dirty_bitmap = vfio_legacy_query_dirty_bitmap,
};
--
2.43.0
next prev parent reply other threads:[~2023-12-19 18:58 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-19 18:55 [PULL 00/47] vfio queue Cédric Le Goater
2023-12-19 18:55 ` [PULL 01/47] vfio: Introduce base object for VFIOContainer and targeted interface Cédric Le Goater
2023-12-19 18:55 ` [PULL 02/47] vfio/container: Introduce a empty VFIOIOMMUOps Cédric Le Goater
2023-12-19 18:55 ` [PULL 03/47] vfio/container: Switch to dma_map|unmap API Cédric Le Goater
2023-12-19 18:56 ` [PULL 04/47] vfio/common: Introduce vfio_container_init/destroy helper Cédric Le Goater
2023-12-19 18:56 ` [PULL 05/47] vfio/common: Move giommu_list in base container Cédric Le Goater
2023-12-19 18:56 ` [PULL 06/47] vfio/container: Move space field to " Cédric Le Goater
2023-12-19 18:56 ` Cédric Le Goater [this message]
2023-12-19 18:56 ` [PULL 08/47] vfio/container: Move per container device list in " Cédric Le Goater
2023-12-19 18:56 ` [PULL 09/47] vfio/container: Convert functions to " Cédric Le Goater
2023-12-19 18:56 ` [PULL 10/47] vfio/container: Move pgsizes and dma_max_mappings " Cédric Le Goater
2023-12-19 18:56 ` [PULL 11/47] vfio/container: Move vrdl_list " Cédric Le Goater
2023-12-19 18:56 ` [PULL 12/47] vfio/container: Move listener " Cédric Le Goater
2023-12-19 18:56 ` [PULL 13/47] vfio/container: Move dirty_pgsizes and max_dirty_bitmap_size " Cédric Le Goater
2023-12-19 18:56 ` [PULL 14/47] vfio/container: Move iova_ranges " Cédric Le Goater
2023-12-19 18:56 ` [PULL 15/47] vfio/container: Implement attach/detach_device Cédric Le Goater
2023-12-19 18:56 ` [PULL 16/47] vfio/spapr: Introduce spapr backend and target interface Cédric Le Goater
2023-12-19 18:56 ` [PULL 17/47] vfio/spapr: switch to spapr IOMMU BE add/del_section_window Cédric Le Goater
2023-12-19 18:56 ` [PULL 18/47] vfio/spapr: Move prereg_listener into spapr container Cédric Le Goater
2023-12-19 18:56 ` [PULL 19/47] vfio/spapr: Move hostwin_list " Cédric Le Goater
2023-12-19 18:56 ` [PULL 20/47] backends/iommufd: Introduce the iommufd object Cédric Le Goater
2023-12-21 16:00 ` Cédric Le Goater
2023-12-21 17:14 ` Eric Auger
2023-12-21 21:23 ` Cédric Le Goater
2023-12-22 10:09 ` Eric Auger
2023-12-22 10:34 ` Cédric Le Goater
2023-12-22 2:41 ` Duan, Zhenzhong
2023-12-19 18:56 ` [PULL 21/47] util/char_dev: Add open_cdev() Cédric Le Goater
2023-12-19 18:56 ` [PULL 22/47] vfio/common: return early if space isn't empty Cédric Le Goater
2023-12-19 18:56 ` [PULL 23/47] vfio/iommufd: Implement the iommufd backend Cédric Le Goater
2023-12-19 18:56 ` [PULL 24/47] vfio/iommufd: Relax assert check for " Cédric Le Goater
2023-12-19 18:56 ` [PULL 25/47] vfio/iommufd: Add support for iova_ranges and pgsizes Cédric Le Goater
2023-12-19 18:56 ` [PULL 26/47] vfio/pci: Extract out a helper vfio_pci_get_pci_hot_reset_info Cédric Le Goater
2023-12-19 18:56 ` [PULL 27/47] vfio/pci: Introduce a vfio pci hot reset interface Cédric Le Goater
2023-12-19 18:56 ` [PULL 28/47] vfio/iommufd: Enable pci hot reset through iommufd cdev interface Cédric Le Goater
2023-12-19 18:56 ` [PULL 29/47] vfio/pci: Allow the selection of a given iommu backend Cédric Le Goater
2023-12-19 18:56 ` [PULL 30/47] vfio/pci: Make vfio cdev pre-openable by passing a file handle Cédric Le Goater
2023-12-19 18:56 ` [PULL 31/47] vfio/platform: Allow the selection of a given iommu backend Cédric Le Goater
2023-12-19 18:56 ` [PULL 32/47] vfio/platform: Make vfio cdev pre-openable by passing a file handle Cédric Le Goater
2023-12-19 18:56 ` [PULL 33/47] vfio/ap: Allow the selection of a given iommu backend Cédric Le Goater
2023-12-19 18:56 ` [PULL 34/47] vfio/ap: Make vfio cdev pre-openable by passing a file handle Cédric Le Goater
2023-12-19 18:56 ` [PULL 35/47] vfio/ccw: Allow the selection of a given iommu backend Cédric Le Goater
2023-12-19 18:56 ` [PULL 36/47] vfio/ccw: Make vfio cdev pre-openable by passing a file handle Cédric Le Goater
2023-12-19 18:56 ` [PULL 37/47] vfio: Make VFIOContainerBase poiner parameter const in VFIOIOMMUOps callbacks Cédric Le Goater
2023-12-19 18:56 ` [PULL 38/47] hw/arm: Activate IOMMUFD for virt machines Cédric Le Goater
2023-12-19 18:56 ` [PULL 39/47] kconfig: Activate IOMMUFD for s390x machines Cédric Le Goater
2023-12-19 18:56 ` [PULL 40/47] hw/i386: Activate IOMMUFD for q35 machines Cédric Le Goater
2023-12-19 18:56 ` [PULL 41/47] vfio/pci: Move VFIODevice initializations in vfio_instance_init Cédric Le Goater
2023-12-19 18:56 ` [PULL 42/47] vfio/platform: Move VFIODevice initializations in vfio_platform_instance_init Cédric Le Goater
2023-12-19 18:56 ` [PULL 43/47] vfio/ap: Move VFIODevice initializations in vfio_ap_instance_init Cédric Le Goater
2023-12-19 18:56 ` [PULL 44/47] vfio/ccw: Move VFIODevice initializations in vfio_ccw_instance_init Cédric Le Goater
2023-12-19 18:56 ` [PULL 45/47] vfio: Introduce a helper function to initialize VFIODevice Cédric Le Goater
2023-12-19 18:56 ` [PULL 46/47] docs/devel: Add VFIO iommufd backend documentation Cédric Le Goater
2023-12-19 18:56 ` [PULL 47/47] hw/ppc/Kconfig: Imply VFIO_PCI Cédric Le Goater
2023-12-20 16:03 ` [PULL 00/47] vfio queue Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231219185643.725448-8-clg@redhat.com \
--to=clg@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=eric.auger@redhat.com \
--cc=farman@linux.ibm.com \
--cc=harshpb@linux.ibm.com \
--cc=mjrosato@linux.ibm.com \
--cc=npiggin@gmail.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
--cc=thuth@redhat.com \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@linux.intel.com \
--cc=zhenzhong.duan@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).