From: "Cédric Le Goater" <clg@redhat.com>
To: Eric Auger <eric.auger@redhat.com>,
eric.auger.pro@gmail.com, qemu-devel@nongnu.org,
zhenzhong.duan@intel.com, alex.williamson@redhat.com,
jgg@nvidia.com, nicolinc@nvidia.com, joao.m.martins@oracle.com,
peterx@redhat.com, kevin.tian@intel.com, yi.l.liu@intel.com,
yi.y.sun@intel.com, chao.p.peng@intel.com,
mjrosato@linux.ibm.com
Subject: Re: [PATCH v3 12/15] vfio/common: Introduce a per container device list
Date: Tue, 3 Oct 2023 17:52:17 +0200 [thread overview]
Message-ID: <cd70f534-2b08-3e56-547f-acd182c8e684@redhat.com> (raw)
In-Reply-To: <20231003101530.288864-13-eric.auger@redhat.com>
On 10/3/23 12:14, Eric Auger wrote:
> From: Zhenzhong Duan <zhenzhong.duan@intel.com>
>
> Several functions need to iterate over the VFIO devices attached to
> a given container. This is currently achieved by iterating over the
> groups attached to the container and then over the devices in the group.
> Let's introduce a per container device list that simplifies this
> search.
>
> Per container list is used in below functions:
> vfio_devices_all_dirty_tracking
> vfio_devices_all_device_dirty_tracking
> vfio_devices_all_running_and_mig_active
> vfio_devices_dma_logging_stop
> vfio_devices_dma_logging_start
> vfio_devices_query_dirty_bitmap
>
> This will also ease the migration of IOMMUFD by hiding the group
> specificity.
>
> Suggested-by: Alex Williamson <alex.williamson@redhat.com>
> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
LGTM,
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Thanks,
C.
> ---
> include/hw/vfio/vfio-common.h | 2 +
> hw/vfio/common.c | 145 +++++++++++++++-------------------
> 2 files changed, 67 insertions(+), 80 deletions(-)
>
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index c486bdef2a..8ca70dd821 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -98,6 +98,7 @@ typedef struct VFIOContainer {
> QLIST_HEAD(, VFIOGroup) group_list;
> QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
> QLIST_ENTRY(VFIOContainer) next;
> + QLIST_HEAD(, VFIODevice) device_list;
> } VFIOContainer;
>
> typedef struct VFIOGuestIOMMU {
> @@ -129,6 +130,7 @@ typedef struct VFIODeviceOps VFIODeviceOps;
>
> typedef struct VFIODevice {
> QLIST_ENTRY(VFIODevice) next;
> + QLIST_ENTRY(VFIODevice) container_next;
> struct VFIOGroup *group;
> char *sysfsdev;
> char *name;
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 019da387d2..ef9dc7c747 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -218,7 +218,6 @@ bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
>
> static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
> {
> - VFIOGroup *group;
> VFIODevice *vbasedev;
> MigrationState *ms = migrate_get_current();
>
> @@ -227,19 +226,17 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
> return false;
> }
>
> - QLIST_FOREACH(group, &container->group_list, container_next) {
> - QLIST_FOREACH(vbasedev, &group->device_list, next) {
> - VFIOMigration *migration = vbasedev->migration;
> + QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
> + VFIOMigration *migration = vbasedev->migration;
>
> - if (!migration) {
> - return false;
> - }
> + if (!migration) {
> + return false;
> + }
>
> - if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
> - (vfio_device_state_is_running(vbasedev) ||
> - vfio_device_state_is_precopy(vbasedev))) {
> - return false;
> - }
> + if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
> + (vfio_device_state_is_running(vbasedev) ||
> + vfio_device_state_is_precopy(vbasedev))) {
> + return false;
> }
> }
> return true;
> @@ -247,14 +244,11 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
>
> static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
> {
> - VFIOGroup *group;
> VFIODevice *vbasedev;
>
> - QLIST_FOREACH(group, &container->group_list, container_next) {
> - QLIST_FOREACH(vbasedev, &group->device_list, next) {
> - if (!vbasedev->dirty_pages_supported) {
> - return false;
> - }
> + QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
> + if (!vbasedev->dirty_pages_supported) {
> + return false;
> }
> }
>
> @@ -267,27 +261,24 @@ static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
> */
> static bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
> {
> - VFIOGroup *group;
> VFIODevice *vbasedev;
>
> if (!migration_is_active(migrate_get_current())) {
> return false;
> }
>
> - QLIST_FOREACH(group, &container->group_list, container_next) {
> - QLIST_FOREACH(vbasedev, &group->device_list, next) {
> - VFIOMigration *migration = vbasedev->migration;
> + QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
> + VFIOMigration *migration = vbasedev->migration;
>
> - if (!migration) {
> - return false;
> - }
> + if (!migration) {
> + return false;
> + }
>
> - if (vfio_device_state_is_running(vbasedev) ||
> - vfio_device_state_is_precopy(vbasedev)) {
> - continue;
> - } else {
> - return false;
> - }
> + if (vfio_device_state_is_running(vbasedev) ||
> + vfio_device_state_is_precopy(vbasedev)) {
> + continue;
> + } else {
> + return false;
> }
> }
> return true;
> @@ -1187,20 +1178,17 @@ static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
> {
> VFIOPCIDevice *pcidev;
> VFIODevice *vbasedev;
> - VFIOGroup *group;
> Object *owner;
>
> owner = memory_region_owner(section->mr);
>
> - QLIST_FOREACH(group, &container->group_list, container_next) {
> - QLIST_FOREACH(vbasedev, &group->device_list, next) {
> - if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
> - continue;
> - }
> - pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
> - if (OBJECT(pcidev) == owner) {
> - return true;
> - }
> + QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
> + if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
> + continue;
> + }
> + pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
> + if (OBJECT(pcidev) == owner) {
> + return true;
> }
> }
>
> @@ -1296,24 +1284,21 @@ static void vfio_devices_dma_logging_stop(VFIOContainer *container)
> sizeof(uint64_t))] = {};
> struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
> VFIODevice *vbasedev;
> - VFIOGroup *group;
>
> feature->argsz = sizeof(buf);
> feature->flags = VFIO_DEVICE_FEATURE_SET |
> VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
>
> - QLIST_FOREACH(group, &container->group_list, container_next) {
> - QLIST_FOREACH(vbasedev, &group->device_list, next) {
> - if (!vbasedev->dirty_tracking) {
> - continue;
> - }
> -
> - if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
> - warn_report("%s: Failed to stop DMA logging, err %d (%s)",
> - vbasedev->name, -errno, strerror(errno));
> - }
> - vbasedev->dirty_tracking = false;
> + QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
> + if (!vbasedev->dirty_tracking) {
> + continue;
> }
> +
> + if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
> + warn_report("%s: Failed to stop DMA logging, err %d (%s)",
> + vbasedev->name, -errno, strerror(errno));
> + }
> + vbasedev->dirty_tracking = false;
> }
> }
>
> @@ -1396,7 +1381,6 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container)
> struct vfio_device_feature *feature;
> VFIODirtyRanges ranges;
> VFIODevice *vbasedev;
> - VFIOGroup *group;
> int ret = 0;
>
> vfio_dirty_tracking_init(container, &ranges);
> @@ -1406,21 +1390,19 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container)
> return -errno;
> }
>
> - QLIST_FOREACH(group, &container->group_list, container_next) {
> - QLIST_FOREACH(vbasedev, &group->device_list, next) {
> - if (vbasedev->dirty_tracking) {
> - continue;
> - }
> -
> - ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
> - if (ret) {
> - ret = -errno;
> - error_report("%s: Failed to start DMA logging, err %d (%s)",
> - vbasedev->name, ret, strerror(errno));
> - goto out;
> - }
> - vbasedev->dirty_tracking = true;
> + QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
> + if (vbasedev->dirty_tracking) {
> + continue;
> }
> +
> + ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
> + if (ret) {
> + ret = -errno;
> + error_report("%s: Failed to start DMA logging, err %d (%s)",
> + vbasedev->name, ret, strerror(errno));
> + goto out;
> + }
> + vbasedev->dirty_tracking = true;
> }
>
> out:
> @@ -1500,21 +1482,18 @@ static int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
> hwaddr size)
> {
> VFIODevice *vbasedev;
> - VFIOGroup *group;
> int ret;
>
> - QLIST_FOREACH(group, &container->group_list, container_next) {
> - QLIST_FOREACH(vbasedev, &group->device_list, next) {
> - ret = vfio_device_dma_logging_report(vbasedev, iova, size,
> - vbmap->bitmap);
> - if (ret) {
> - error_report("%s: Failed to get DMA logging report, iova: "
> - "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
> - ", err: %d (%s)",
> - vbasedev->name, iova, size, ret, strerror(-ret));
> + QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
> + ret = vfio_device_dma_logging_report(vbasedev, iova, size,
> + vbmap->bitmap);
> + if (ret) {
> + error_report("%s: Failed to get DMA logging report, iova: "
> + "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
> + ", err: %d (%s)",
> + vbasedev->name, iova, size, ret, strerror(-ret));
>
> - return ret;
> - }
> + return ret;
> }
> }
>
> @@ -2648,6 +2627,7 @@ int vfio_attach_device(char *name, VFIODevice *vbasedev,
> int groupid = vfio_device_groupid(vbasedev, errp);
> VFIODevice *vbasedev_iter;
> VFIOGroup *group;
> + VFIOContainer *container;
> int ret;
>
> if (groupid < 0) {
> @@ -2671,8 +2651,12 @@ int vfio_attach_device(char *name, VFIODevice *vbasedev,
> ret = vfio_get_device(group, name, vbasedev, errp);
> if (ret) {
> vfio_put_group(group);
> + return ret;
> }
>
> + container = group->container;
> + QLIST_INSERT_HEAD(&container->device_list, vbasedev, container_next);
> +
> return ret;
> }
>
> @@ -2680,6 +2664,7 @@ void vfio_detach_device(VFIODevice *vbasedev)
> {
> VFIOGroup *group = vbasedev->group;
>
> + QLIST_REMOVE(vbasedev, container_next);
> trace_vfio_detach_device(vbasedev->name, group->groupid);
> vfio_put_base_device(vbasedev);
> vfio_put_group(group);
next prev parent reply other threads:[~2023-10-03 15:53 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-03 10:13 [PATCH v3 00/15] Prerequisite changes for IOMMUFD support Eric Auger
2023-10-03 10:13 ` [PATCH v3 01/15] scripts/update-linux-headers: Add iommufd.h Eric Auger
2023-10-03 13:50 ` Cédric Le Goater
2023-10-03 14:09 ` Eric Auger
2023-10-03 10:13 ` [PATCH v3 02/15] linux-headers: " Eric Auger
2023-10-03 10:14 ` [PATCH v3 03/15] vfio/common: Move IOMMU agnostic helpers to a separate file Eric Auger
2023-10-04 5:35 ` Cédric Le Goater
2023-10-03 10:14 ` [PATCH v3 04/15] vfio/common: Propagate KVM_SET_DEVICE_ATTR error if any Eric Auger
2023-10-03 14:53 ` Cédric Le Goater
2023-10-04 9:44 ` Eric Auger
2023-10-03 10:14 ` [PATCH v3 05/15] vfio/common: Introduce vfio_container_add|del_section_window() Eric Auger
2023-10-03 10:14 ` [PATCH v3 06/15] vfio/common: Extract out vfio_kvm_device_[add/del]_fd Eric Auger
2023-10-03 10:14 ` [PATCH v3 07/15] vfio/pci: Introduce vfio_[attach/detach]_device Eric Auger
2023-10-03 15:14 ` Cédric Le Goater
2023-10-03 10:14 ` [PATCH v3 08/15] vfio/platform: Use vfio_[attach/detach]_device Eric Auger
2023-10-03 15:15 ` Cédric Le Goater
2023-10-03 10:14 ` [PATCH v3 09/15] vfio/ap: " Eric Auger
2023-10-03 15:25 ` Cédric Le Goater
2023-10-03 23:08 ` Matthew Rosato
2023-10-04 9:55 ` Eric Auger
2023-10-04 9:58 ` Eric Auger
2023-10-04 13:41 ` Matthew Rosato
2023-10-04 13:48 ` Eric Auger
2023-10-03 10:14 ` [PATCH v3 10/15] vfio/ccw: " Eric Auger
2023-10-03 15:45 ` Cédric Le Goater
2023-10-04 12:30 ` Eric Auger
2023-10-03 23:01 ` Matthew Rosato
2023-10-04 12:32 ` Eric Auger
2023-10-03 10:14 ` [PATCH v3 11/15] vfio/common: Move VFIO reset handler registration to a group agnostic function Eric Auger
2023-10-03 15:46 ` Cédric Le Goater
2023-10-03 10:14 ` [PATCH v3 12/15] vfio/common: Introduce a per container device list Eric Auger
2023-10-03 15:52 ` Cédric Le Goater [this message]
2023-10-03 10:14 ` [PATCH v3 13/15] vfio/common: Store the parent container in VFIODevice Eric Auger
2023-10-03 15:59 ` Cédric Le Goater
2023-10-04 13:03 ` Eric Auger
2023-10-04 16:55 ` Cédric Le Goater
2023-10-04 17:00 ` Eric Auger
2023-10-03 10:14 ` [PATCH v3 14/15] vfio/common: Introduce a global VFIODevice list Eric Auger
2023-10-03 15:56 ` Cédric Le Goater
2023-10-04 13:54 ` Eric Auger
2023-10-03 10:14 ` [PATCH v3 15/15] vfio/common: Move legacy VFIO backend code into separate container.c Eric Auger
2023-10-03 16:08 ` Cédric Le Goater
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=cd70f534-2b08-3e56-547f-acd182c8e684@redhat.com \
--to=clg@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=chao.p.peng@intel.com \
--cc=eric.auger.pro@gmail.com \
--cc=eric.auger@redhat.com \
--cc=jgg@nvidia.com \
--cc=joao.m.martins@oracle.com \
--cc=kevin.tian@intel.com \
--cc=mjrosato@linux.ibm.com \
--cc=nicolinc@nvidia.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@intel.com \
--cc=zhenzhong.duan@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).