From: Zhenzhong Duan <zhenzhong.duan@intel.com>
To: qemu-devel@nongnu.org
Cc: alex.williamson@redhat.com, clg@redhat.com, jgg@nvidia.com,
nicolinc@nvidia.com, eric.auger@redhat.com, peterx@redhat.com,
jasonwang@redhat.com, kevin.tian@intel.com, yi.l.liu@intel.com,
yi.y.sun@intel.com, chao.p.peng@intel.com,
Zhenzhong Duan <zhenzhong.duan@intel.com>
Subject: [RFC PATCH v4 06/24] vfio/common: Add a vfio device iterator
Date: Wed, 12 Jul 2023 15:25:10 +0800 [thread overview]
Message-ID: <20230712072528.275577-7-zhenzhong.duan@intel.com> (raw)
In-Reply-To: <20230712072528.275577-1-zhenzhong.duan@intel.com>
With a vfio device iterator added, we can make some migration and reset
related functions group agnostic.
E.x:
vfio_mig_active
vfio_migratable_device_num
vfio_devices_all_dirty_tracking
vfio_devices_all_device_dirty_tracking
vfio_devices_all_running_and_mig_active
vfio_devices_dma_logging_stop
vfio_devices_dma_logging_start
vfio_devices_query_dirty_bitmap
vfio_reset_handler
Or else we need to add container specific callback variants for above
functions just because they iterate devices based on group.
Move the reset handler registration/unregistration to a place that is not
backend-specific, saying first vfio address space created instead of the
first group.
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
---
hw/vfio/common.c | 226 +++++++++++++++++++++++++----------------------
1 file changed, 121 insertions(+), 105 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index b9d493b211..f7e4dc0cef 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -84,6 +84,26 @@ static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
}
}
+static VFIODevice *vfio_container_dev_iter_next(VFIOContainer *container,
+ VFIODevice *curr)
+{
+ VFIOGroup *group;
+
+ if (!curr) {
+ group = QLIST_FIRST(&container->group_list);
+ } else {
+ if (curr->next.le_next) {
+ return curr->next.le_next;
+ }
+ group = curr->group->container_next.le_next;
+ }
+
+ if (!group) {
+ return NULL;
+ }
+ return QLIST_FIRST(&group->device_list);
+}
+
/*
* Device state interfaces
*/
@@ -112,17 +132,18 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
bool vfio_mig_active(void)
{
- VFIOGroup *group;
+ VFIOAddressSpace *space;
+ VFIOContainer *container;
VFIODevice *vbasedev;
- if (QLIST_EMPTY(&vfio_group_list)) {
- return false;
- }
-
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->migration_blocker) {
- return false;
+ QLIST_FOREACH(space, &vfio_address_spaces, list) {
+ QLIST_FOREACH(container, &space->containers, next) {
+ vbasedev = NULL;
+ while ((vbasedev = vfio_container_dev_iter_next(container,
+ vbasedev))) {
+ if (vbasedev->migration_blocker) {
+ return false;
+ }
}
}
}
@@ -133,14 +154,19 @@ static Error *multiple_devices_migration_blocker;
static unsigned int vfio_migratable_device_num(void)
{
- VFIOGroup *group;
+ VFIOAddressSpace *space;
+ VFIOContainer *container;
VFIODevice *vbasedev;
unsigned int device_num = 0;
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->migration) {
- device_num++;
+ QLIST_FOREACH(space, &vfio_address_spaces, list) {
+ QLIST_FOREACH(container, &space->containers, next) {
+ vbasedev = NULL;
+ while ((vbasedev = vfio_container_dev_iter_next(container,
+ vbasedev))) {
+ if (vbasedev->migration) {
+ device_num++;
+ }
}
}
}
@@ -207,8 +233,7 @@ static void vfio_set_migration_error(int err)
static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
{
- VFIOGroup *group;
- VFIODevice *vbasedev;
+ VFIODevice *vbasedev = NULL;
MigrationState *ms = migrate_get_current();
if (ms->state != MIGRATION_STATUS_ACTIVE &&
@@ -216,19 +241,17 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
return false;
}
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- VFIOMigration *migration = vbasedev->migration;
+ while ((vbasedev = vfio_container_dev_iter_next(container, vbasedev))) {
+ VFIOMigration *migration = vbasedev->migration;
- if (!migration) {
- return false;
- }
+ if (!migration) {
+ return false;
+ }
- if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
- (migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
- migration->device_state == VFIO_DEVICE_STATE_PRE_COPY)) {
- return false;
- }
+ if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
+ (migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
+ migration->device_state == VFIO_DEVICE_STATE_PRE_COPY)) {
+ return false;
}
}
return true;
@@ -236,14 +259,11 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
{
- VFIOGroup *group;
- VFIODevice *vbasedev;
+ VFIODevice *vbasedev = NULL;
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (!vbasedev->dirty_pages_supported) {
- return false;
- }
+ while ((vbasedev = vfio_container_dev_iter_next(container, vbasedev))) {
+ if (!vbasedev->dirty_pages_supported) {
+ return false;
}
}
@@ -256,27 +276,24 @@ static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
*/
static bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
{
- VFIOGroup *group;
- VFIODevice *vbasedev;
+ VFIODevice *vbasedev = NULL;
if (!migration_is_active(migrate_get_current())) {
return false;
}
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- VFIOMigration *migration = vbasedev->migration;
+ while ((vbasedev = vfio_container_dev_iter_next(container, vbasedev))) {
+ VFIOMigration *migration = vbasedev->migration;
- if (!migration) {
- return false;
- }
+ if (!migration) {
+ return false;
+ }
- if (migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
- migration->device_state == VFIO_DEVICE_STATE_PRE_COPY) {
- continue;
- } else {
- return false;
- }
+ if (migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
+ migration->device_state == VFIO_DEVICE_STATE_PRE_COPY) {
+ continue;
+ } else {
+ return false;
}
}
return true;
@@ -1243,25 +1260,22 @@ static void vfio_devices_dma_logging_stop(VFIOContainer *container)
uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
sizeof(uint64_t))] = {};
struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
- VFIODevice *vbasedev;
- VFIOGroup *group;
+ VFIODevice *vbasedev = NULL;
feature->argsz = sizeof(buf);
feature->flags = VFIO_DEVICE_FEATURE_SET |
VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (!vbasedev->dirty_tracking) {
- continue;
- }
+ while ((vbasedev = vfio_container_dev_iter_next(container, vbasedev))) {
+ if (!vbasedev->dirty_tracking) {
+ continue;
+ }
- if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
- warn_report("%s: Failed to stop DMA logging, err %d (%s)",
- vbasedev->name, -errno, strerror(errno));
- }
- vbasedev->dirty_tracking = false;
+ if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
+ warn_report("%s: Failed to stop DMA logging, err %d (%s)",
+ vbasedev->name, -errno, strerror(errno));
}
+ vbasedev->dirty_tracking = false;
}
}
@@ -1336,8 +1350,7 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container)
{
struct vfio_device_feature *feature;
VFIODirtyRanges ranges;
- VFIODevice *vbasedev;
- VFIOGroup *group;
+ VFIODevice *vbasedev = NULL;
int ret = 0;
vfio_dirty_tracking_init(container, &ranges);
@@ -1347,21 +1360,19 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container)
return -errno;
}
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->dirty_tracking) {
- continue;
- }
+ while ((vbasedev = vfio_container_dev_iter_next(container, vbasedev))) {
+ if (vbasedev->dirty_tracking) {
+ continue;
+ }
- ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
- if (ret) {
- ret = -errno;
- error_report("%s: Failed to start DMA logging, err %d (%s)",
- vbasedev->name, ret, strerror(errno));
- goto out;
- }
- vbasedev->dirty_tracking = true;
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
+ if (ret) {
+ ret = -errno;
+ error_report("%s: Failed to start DMA logging, err %d (%s)",
+ vbasedev->name, ret, strerror(errno));
+ goto out;
}
+ vbasedev->dirty_tracking = true;
}
out:
@@ -1440,22 +1451,19 @@ static int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
VFIOBitmap *vbmap, hwaddr iova,
hwaddr size)
{
- VFIODevice *vbasedev;
- VFIOGroup *group;
+ VFIODevice *vbasedev = NULL;
int ret;
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- ret = vfio_device_dma_logging_report(vbasedev, iova, size,
- vbmap->bitmap);
- if (ret) {
- error_report("%s: Failed to get DMA logging report, iova: "
- "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
- ", err: %d (%s)",
- vbasedev->name, iova, size, ret, strerror(-ret));
+ while ((vbasedev = vfio_container_dev_iter_next(container, vbasedev))) {
+ ret = vfio_device_dma_logging_report(vbasedev, iova, size,
+ vbmap->bitmap);
+ if (ret) {
+ error_report("%s: Failed to get DMA logging report, iova: "
+ "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
+ ", err: %d (%s)",
+ vbasedev->name, iova, size, ret, strerror(-ret));
- return ret;
- }
+ return ret;
}
}
@@ -1739,21 +1747,30 @@ bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
void vfio_reset_handler(void *opaque)
{
- VFIOGroup *group;
+ VFIOAddressSpace *space;
+ VFIOContainer *container;
VFIODevice *vbasedev;
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->dev->realized) {
- vbasedev->ops->vfio_compute_needs_reset(vbasedev);
+ QLIST_FOREACH(space, &vfio_address_spaces, list) {
+ QLIST_FOREACH(container, &space->containers, next) {
+ vbasedev = NULL;
+ while ((vbasedev = vfio_container_dev_iter_next(container,
+ vbasedev))) {
+ if (vbasedev->dev->realized) {
+ vbasedev->ops->vfio_compute_needs_reset(vbasedev);
+ }
}
}
}
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->dev->realized && vbasedev->needs_reset) {
- vbasedev->ops->vfio_hot_reset_multi(vbasedev);
+ QLIST_FOREACH(space, &vfio_address_spaces, list) {
+ QLIST_FOREACH(container, &space->containers, next) {
+ vbasedev = NULL;
+ while ((vbasedev = vfio_container_dev_iter_next(container,
+ vbasedev))) {
+ if (vbasedev->dev->realized && vbasedev->needs_reset) {
+ vbasedev->ops->vfio_hot_reset_multi(vbasedev);
+ }
}
}
}
@@ -1847,6 +1864,10 @@ static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
space->as = as;
QLIST_INIT(&space->containers);
+ if (QLIST_EMPTY(&vfio_address_spaces)) {
+ qemu_register_reset(vfio_reset_handler, NULL);
+ }
+
QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
return space;
@@ -1858,6 +1879,9 @@ static void vfio_put_address_space(VFIOAddressSpace *space)
QLIST_REMOVE(space, list);
g_free(space);
}
+ if (QLIST_EMPTY(&vfio_address_spaces)) {
+ qemu_unregister_reset(vfio_reset_handler, NULL);
+ }
}
/*
@@ -2323,10 +2347,6 @@ VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
goto close_fd_exit;
}
- if (QLIST_EMPTY(&vfio_group_list)) {
- qemu_register_reset(vfio_reset_handler, NULL);
- }
-
QLIST_INSERT_HEAD(&vfio_group_list, group, next);
return group;
@@ -2355,10 +2375,6 @@ void vfio_put_group(VFIOGroup *group)
trace_vfio_put_group(group->fd);
close(group->fd);
g_free(group);
-
- if (QLIST_EMPTY(&vfio_group_list)) {
- qemu_unregister_reset(vfio_reset_handler, NULL);
- }
}
struct vfio_device_info *vfio_get_device_info(int fd)
--
2.34.1
next prev parent reply other threads:[~2023-07-12 7:42 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-12 7:25 [RFC PATCH v4 00/24] vfio: Adopt iommufd Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 01/24] scripts/update-linux-headers: Add iommufd.h Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 02/24] Update linux-header per VFIO device cdev v14 Zhenzhong Duan
2023-07-12 7:49 ` Cornelia Huck
2023-07-12 9:28 ` Duan, Zhenzhong
2023-07-12 9:35 ` Cornelia Huck
2023-07-12 9:50 ` Duan, Zhenzhong
2023-07-12 7:25 ` [RFC PATCH v4 03/24] vfio/common: Move IOMMU agnostic helpers to a separate file Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 04/24] vfio/common: Introduce vfio_container_add|del_section_window() Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 05/24] vfio/common: Extract out vfio_kvm_device_[add/del]_fd Zhenzhong Duan
2023-07-12 7:25 ` Zhenzhong Duan [this message]
2023-07-12 7:25 ` [RFC PATCH v4 07/24] vfio/common: Refactor vfio_viommu_preset() to be group agnostic Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 08/24] vfio/common: Move legacy VFIO backend code into separate container.c Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 09/24] vfio/common: Rename into as.c Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 10/24] vfio: Add base container Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 11/24] vfio/container: Introduce vfio_[attach/detach]_device Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 12/24] vfio/platform: Use vfio_[attach/detach]_device Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 13/24] vfio/ap: " Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 14/24] vfio/ccw: " Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 15/24] vfio/container-base: Introduce [attach/detach]_device container callbacks Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 16/24] vfio/as: Simplify vfio_viommu_preset() Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 17/24] Add iommufd configure option Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 18/24] backends/iommufd: Introduce the iommufd object Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 19/24] util/char_dev: Add open_cdev() Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 20/24] vfio/iommufd: Implement the iommufd backend Zhenzhong Duan
[not found] ` <CGME20230714092325eucas1p105978aa928776b6e008e08e793d0ac60@eucas1p1.samsung.com>
2023-07-14 9:23 ` Joel Granados
2023-07-14 9:35 ` Duan, Zhenzhong
2023-07-12 7:25 ` [RFC PATCH v4 21/24] vfio/as: Add vfio device iterator callback for iommufd Zhenzhong Duan
2023-08-17 5:48 ` Nicolin Chen
2023-08-17 6:05 ` Duan, Zhenzhong
2023-07-12 7:25 ` [RFC PATCH v4 22/24] vfio/pci: Adapt vfio pci hot reset support with iommufd BE Zhenzhong Duan
2023-08-17 5:25 ` Nicolin Chen
2023-08-17 6:11 ` Duan, Zhenzhong
2023-07-12 7:25 ` [RFC PATCH v4 23/24] vfio/as: Allow the selection of a given iommu backend Zhenzhong Duan
2023-07-12 7:25 ` [RFC PATCH v4 24/24] vfio/iommufd: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-08-01 8:28 ` [RFC PATCH v4 00/24] vfio: Adopt iommufd Duan, Zhenzhong
2023-08-02 22:44 ` Nicolin Chen
2023-08-03 1:31 ` Duan, Zhenzhong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230712072528.275577-7-zhenzhong.duan@intel.com \
--to=zhenzhong.duan@intel.com \
--cc=alex.williamson@redhat.com \
--cc=chao.p.peng@intel.com \
--cc=clg@redhat.com \
--cc=eric.auger@redhat.com \
--cc=jasonwang@redhat.com \
--cc=jgg@nvidia.com \
--cc=kevin.tian@intel.com \
--cc=nicolinc@nvidia.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).