qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Zhenzhong Duan <zhenzhong.duan@intel.com>
To: qemu-devel@nongnu.org
Cc: alex.williamson@redhat.com, clg@redhat.com, jgg@nvidia.com,
	nicolinc@nvidia.com, joao.m.martins@oracle.com,
	eric.auger@redhat.com, peterx@redhat.com, jasowang@redhat.com,
	kevin.tian@intel.com, yi.l.liu@intel.com, yi.y.sun@intel.com,
	chao.p.peng@intel.com, Yi Sun <yi.y.sun@linux.intel.com>,
	Zhenzhong Duan <zhenzhong.duan@intel.com>,
	Nicholas Piggin <npiggin@gmail.com>,
	Daniel Henrique Barboza <danielhb413@gmail.com>,
	David Gibson <david@gibson.dropbear.id.au>,
	Harsh Prateek Bora <harshpb@linux.ibm.com>,
	qemu-ppc@nongnu.org (open list:sPAPR (pseries))
Subject: [PATCH v3 14/37] vfio/container: Move vrdl_list, pgsizes and dma_max_mappings to base container
Date: Thu, 26 Oct 2023 18:30:41 +0800	[thread overview]
Message-ID: <20231026103104.1686921-15-zhenzhong.duan@intel.com> (raw)
In-Reply-To: <20231026103104.1686921-1-zhenzhong.duan@intel.com>

From: Eric Auger <eric.auger@redhat.com>

Move vrdl_list, pgsizes and dma_max_mappings to the base
container object

No functional change intended.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
[ clg: context changes ]
Signed-off-by: Cédric Le Goater <clg@redhat.com>
---
 include/hw/vfio/vfio-common.h         | 13 -------
 include/hw/vfio/vfio-container-base.h | 13 +++++++
 hw/vfio/common.c                      | 49 ++++++++++++++-------------
 hw/vfio/container-base.c              | 12 +++++++
 hw/vfio/container.c                   | 12 +++----
 hw/vfio/spapr.c                       | 18 +++++++---
 6 files changed, 68 insertions(+), 49 deletions(-)

diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index fb3c7aea8f..65ae2d76cf 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -85,24 +85,11 @@ typedef struct VFIOContainer {
     bool initialized;
     uint64_t dirty_pgsizes;
     uint64_t max_dirty_bitmap_size;
-    unsigned long pgsizes;
-    unsigned int dma_max_mappings;
     QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
     QLIST_HEAD(, VFIOGroup) group_list;
-    QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
     GList *iova_ranges;
 } VFIOContainer;
 
-typedef struct VFIORamDiscardListener {
-    VFIOContainer *container;
-    MemoryRegion *mr;
-    hwaddr offset_within_address_space;
-    hwaddr size;
-    uint64_t granularity;
-    RamDiscardListener listener;
-    QLIST_ENTRY(VFIORamDiscardListener) next;
-} VFIORamDiscardListener;
-
 typedef struct VFIOHostDMAWindow {
     hwaddr min_iova;
     hwaddr max_iova;
diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h
index f1de1ef120..849c8b34b2 100644
--- a/include/hw/vfio/vfio-container-base.h
+++ b/include/hw/vfio/vfio-container-base.h
@@ -50,8 +50,11 @@ typedef struct VFIOAddressSpace {
 typedef struct VFIOContainerBase {
     const VFIOIOMMUOps *ops;
     VFIOAddressSpace *space;
+    unsigned long pgsizes;
+    unsigned int dma_max_mappings;
     bool dirty_pages_supported;
     QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
+    QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
     QLIST_ENTRY(VFIOContainerBase) next;
     QLIST_HEAD(, VFIODevice) device_list;
 } VFIOContainerBase;
@@ -64,6 +67,16 @@ typedef struct VFIOGuestIOMMU {
     QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
 } VFIOGuestIOMMU;
 
+typedef struct VFIORamDiscardListener {
+    VFIOContainerBase *bcontainer;
+    MemoryRegion *mr;
+    hwaddr offset_within_address_space;
+    hwaddr size;
+    uint64_t granularity;
+    RamDiscardListener listener;
+    QLIST_ENTRY(VFIORamDiscardListener) next;
+} VFIORamDiscardListener;
+
 int vfio_container_dma_map(VFIOContainerBase *bcontainer,
                            hwaddr iova, ram_addr_t size,
                            void *vaddr, bool readonly);
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 91411d9844..9b34e7e0f8 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -351,13 +351,13 @@ static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
 {
     VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
                                                 listener);
+    VFIOContainerBase *bcontainer = vrdl->bcontainer;
     const hwaddr size = int128_get64(section->size);
     const hwaddr iova = section->offset_within_address_space;
     int ret;
 
     /* Unmap with a single call. */
-    ret = vfio_container_dma_unmap(&vrdl->container->bcontainer,
-                                   iova, size , NULL);
+    ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL);
     if (ret) {
         error_report("%s: vfio_container_dma_unmap() failed: %s", __func__,
                      strerror(-ret));
@@ -369,6 +369,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
 {
     VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
                                                 listener);
+    VFIOContainerBase *bcontainer = vrdl->bcontainer;
     const hwaddr end = section->offset_within_region +
                        int128_get64(section->size);
     hwaddr start, next, iova;
@@ -387,8 +388,8 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
                section->offset_within_address_space;
         vaddr = memory_region_get_ram_ptr(section->mr) + start;
 
-        ret = vfio_container_dma_map(&vrdl->container->bcontainer, iova,
-                                     next - start, vaddr, section->readonly);
+        ret = vfio_container_dma_map(bcontainer, iova, next - start,
+                                     vaddr, section->readonly);
         if (ret) {
             /* Rollback */
             vfio_ram_discard_notify_discard(rdl, section);
@@ -398,7 +399,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
     return 0;
 }
 
-static void vfio_register_ram_discard_listener(VFIOContainer *container,
+static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer,
                                                MemoryRegionSection *section)
 {
     RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
@@ -411,7 +412,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container,
     g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
 
     vrdl = g_new0(VFIORamDiscardListener, 1);
-    vrdl->container = container;
+    vrdl->bcontainer = bcontainer;
     vrdl->mr = section->mr;
     vrdl->offset_within_address_space = section->offset_within_address_space;
     vrdl->size = int128_get64(section->size);
@@ -419,14 +420,14 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container,
                                                                 section->mr);
 
     g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
-    g_assert(container->pgsizes &&
-             vrdl->granularity >= 1ULL << ctz64(container->pgsizes));
+    g_assert(bcontainer->pgsizes &&
+             vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes));
 
     ram_discard_listener_init(&vrdl->listener,
                               vfio_ram_discard_notify_populate,
                               vfio_ram_discard_notify_discard, true);
     ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
-    QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
+    QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next);
 
     /*
      * Sanity-check if we have a theoretically problematic setup where we could
@@ -441,7 +442,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container,
      * number of sections in the address space we could have over time,
      * also consuming DMA mappings.
      */
-    if (container->dma_max_mappings) {
+    if (bcontainer->dma_max_mappings) {
         unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
 
 #ifdef CONFIG_KVM
@@ -450,7 +451,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container,
         }
 #endif
 
-        QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+        QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
             hwaddr start, end;
 
             start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
@@ -462,23 +463,23 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container,
         }
 
         if (vrdl_mappings + max_memslots - vrdl_count >
-            container->dma_max_mappings) {
+            bcontainer->dma_max_mappings) {
             warn_report("%s: possibly running out of DMA mappings. E.g., try"
                         " increasing the 'block-size' of virtio-mem devies."
                         " Maximum possible DMA mappings: %d, Maximum possible"
-                        " memslots: %d", __func__, container->dma_max_mappings,
+                        " memslots: %d", __func__, bcontainer->dma_max_mappings,
                         max_memslots);
         }
     }
 }
 
-static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
+static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer,
                                                  MemoryRegionSection *section)
 {
     RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
     VFIORamDiscardListener *vrdl = NULL;
 
-    QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+    QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
         if (vrdl->mr == section->mr &&
             vrdl->offset_within_address_space ==
             section->offset_within_address_space) {
@@ -627,7 +628,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
                             iommu_idx);
 
         ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr,
-                                                     container->pgsizes,
+                                                     bcontainer->pgsizes,
                                                      &err);
         if (ret) {
             g_free(giommu);
@@ -663,7 +664,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
      * about changes.
      */
     if (memory_region_has_ram_discard_manager(section->mr)) {
-        vfio_register_ram_discard_listener(container, section);
+        vfio_register_ram_discard_listener(bcontainer, section);
         return;
     }
 
@@ -782,7 +783,7 @@ static void vfio_listener_region_del(MemoryListener *listener,
         pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
         try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
     } else if (memory_region_has_ram_discard_manager(section->mr)) {
-        vfio_unregister_ram_discard_listener(container, section);
+        vfio_unregister_ram_discard_listener(bcontainer, section);
         /* Unregistering will trigger an unmap. */
         try_unmap = false;
     }
@@ -1261,17 +1262,17 @@ static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
      * Sync the whole mapped region (spanning multiple individual mappings)
      * in one go.
      */
-    return vfio_get_dirty_bitmap(&vrdl->container->bcontainer, iova, size,
-                                 ram_addr);
+    return vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr);
 }
 
-static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container,
-                                                   MemoryRegionSection *section)
+static int
+vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer,
+                                            MemoryRegionSection *section)
 {
     RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
     VFIORamDiscardListener *vrdl = NULL;
 
-    QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+    QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
         if (vrdl->mr == section->mr &&
             vrdl->offset_within_address_space ==
             section->offset_within_address_space) {
@@ -1325,7 +1326,7 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
         }
         return 0;
     } else if (memory_region_has_ram_discard_manager(section->mr)) {
-        return vfio_sync_ram_discard_listener_dirty_bitmap(container, section);
+        return vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
     }
 
     ram_addr = memory_region_get_ram_addr(section->mr) +
diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c
index a7cf517dd2..568f891841 100644
--- a/hw/vfio/container-base.c
+++ b/hw/vfio/container-base.c
@@ -76,15 +76,27 @@ void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space,
     bcontainer->ops = ops;
     bcontainer->space = space;
     bcontainer->dirty_pages_supported = false;
+    bcontainer->dma_max_mappings = 0;
     QLIST_INIT(&bcontainer->giommu_list);
+    QLIST_INIT(&bcontainer->vrdl_list);
 }
 
 void vfio_container_destroy(VFIOContainerBase *bcontainer)
 {
+    VFIORamDiscardListener *vrdl, *vrdl_tmp;
     VFIOGuestIOMMU *giommu, *tmp;
 
     QLIST_REMOVE(bcontainer, next);
 
+    QLIST_FOREACH_SAFE(vrdl, &bcontainer->vrdl_list, next, vrdl_tmp) {
+        RamDiscardManager *rdm;
+
+        rdm = memory_region_get_ram_discard_manager(vrdl->mr);
+        ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
+        QLIST_REMOVE(vrdl, next);
+        g_free(vrdl);
+    }
+
     QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) {
         memory_region_unregister_iommu_notifier(
                 MEMORY_REGION(giommu->iommu_mr), &giommu->n);
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index 8d5b408e86..0e265ffa67 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -154,7 +154,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase *bcontainer, hwaddr iova,
         if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
             container->iommu_type == VFIO_TYPE1v2_IOMMU) {
             trace_vfio_legacy_dma_unmap_overflow_workaround();
-            unmap.size -= 1ULL << ctz64(container->pgsizes);
+            unmap.size -= 1ULL << ctz64(container->bcontainer.pgsizes);
             continue;
         }
         error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
@@ -559,9 +559,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
     container = g_malloc0(sizeof(*container));
     container->fd = fd;
     container->error = NULL;
-    container->dma_max_mappings = 0;
     container->iova_ranges = NULL;
-    QLIST_INIT(&container->vrdl_list);
     bcontainer = &container->bcontainer;
     vfio_container_init(bcontainer, space, &vfio_legacy_ops);
 
@@ -589,13 +587,13 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
         }
 
         if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
-            container->pgsizes = info->iova_pgsizes;
+            container->bcontainer.pgsizes = info->iova_pgsizes;
         } else {
-            container->pgsizes = qemu_real_host_page_size();
+            container->bcontainer.pgsizes = qemu_real_host_page_size();
         }
 
-        if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
-            container->dma_max_mappings = 65535;
+        if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) {
+            container->bcontainer.dma_max_mappings = 65535;
         }
 
         vfio_get_info_iova_range(info, container);
diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
index 3495737ab2..dbc4c24052 100644
--- a/hw/vfio/spapr.c
+++ b/hw/vfio/spapr.c
@@ -223,13 +223,13 @@ static int vfio_spapr_create_window(VFIOContainer *container,
     if (pagesize > rampagesize) {
         pagesize = rampagesize;
     }
-    pgmask = container->pgsizes & (pagesize | (pagesize - 1));
+    pgmask = container->bcontainer.pgsizes & (pagesize | (pagesize - 1));
     pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0;
     if (!pagesize) {
         error_report("Host doesn't support page size 0x%"PRIx64
                      ", the supported mask is 0x%lx",
                      memory_region_iommu_get_min_page_size(iommu_mr),
-                     container->pgsizes);
+                     container->bcontainer.pgsizes);
         return -EINVAL;
     }
 
@@ -385,7 +385,7 @@ void vfio_container_del_section_window(VFIOContainer *container,
 
 bool vfio_spapr_container_init(VFIOContainer *container, Error **errp)
 {
-
+    VFIOContainerBase *bcontainer = &container->bcontainer;
     struct vfio_iommu_spapr_tce_info info;
     bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
     int ret, fd = container->fd;
@@ -424,7 +424,7 @@ bool vfio_spapr_container_init(VFIOContainer *container, Error **errp)
     }
 
     if (v2) {
-        container->pgsizes = info.ddw.pgsizes;
+        bcontainer->pgsizes = info.ddw.pgsizes;
         /*
          * There is a default window in just created container.
          * To make region_add/del simpler, we better remove this
@@ -439,7 +439,7 @@ bool vfio_spapr_container_init(VFIOContainer *container, Error **errp)
         }
     } else {
         /* The default table uses 4K pages */
-        container->pgsizes = 0x1000;
+        bcontainer->pgsizes = 0x1000;
         vfio_host_win_add(container, info.dma32_window_start,
                           info.dma32_window_start +
                           info.dma32_window_size - 1,
@@ -455,7 +455,15 @@ listener_unregister_exit:
 
 void vfio_spapr_container_deinit(VFIOContainer *container)
 {
+    VFIOHostDMAWindow *hostwin, *next;
+
     if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
         memory_listener_unregister(&container->prereg_listener);
     }
+    QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
+                       next) {
+        QLIST_REMOVE(hostwin, hostwin_next);
+        g_free(hostwin);
+    }
+
 }
-- 
2.34.1



  parent reply	other threads:[~2023-10-26 10:51 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-26 10:30 [PATCH v3 00/37] vfio: Adopt iommufd Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 01/37] vfio/container: Move IBM EEH related functions into spapr_pci_vfio.c Zhenzhong Duan
2023-10-26 14:23   ` Eric Farman
2023-10-27  9:19   ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 02/37] vfio/container: Move vfio_container_add/del_section_window into spapr.c Zhenzhong Duan
2023-10-27  9:19   ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 03/37] vfio/container: Move spapr specific init/deinit " Zhenzhong Duan
2023-10-27  9:27   ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 04/37] vfio/spapr: Make vfio_spapr_create/remove_window static Zhenzhong Duan
2023-10-27  9:27   ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 05/37] vfio/common: Move vfio_host_win_add/del into spapr.c Zhenzhong Duan
2023-10-27  9:30   ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 06/37] vfio: Introduce base object for VFIOContainer and targetted interface Zhenzhong Duan
2023-10-27 14:02   ` Cédric Le Goater
2023-10-30  2:40     ` Duan, Zhenzhong
2023-10-31  7:57       ` Cédric Le Goater
2023-10-31  8:31         ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 07/37] vfio/container: Introduce a empty VFIOIOMMUOps Zhenzhong Duan
2023-10-27 14:20   ` Cédric Le Goater
2023-10-30  2:43     ` Duan, Zhenzhong
2023-10-31  8:21       ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 08/37] vfio/container: Switch to dma_map|unmap API Zhenzhong Duan
2023-10-27 14:26   ` Cédric Le Goater
2023-10-30  2:48     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 09/37] vfio/common: Move giommu_list in base container Zhenzhong Duan
2023-10-27 14:38   ` Cédric Le Goater
2023-10-30  2:48     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 10/37] vfio/container: Move space field to " Zhenzhong Duan
2023-10-27 14:46   ` Cédric Le Goater
2023-10-30  2:51     ` Duan, Zhenzhong
2023-10-31  8:24       ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 11/37] vfio/container: Switch to IOMMU BE set_dirty_page_tracking/query_dirty_bitmap API Zhenzhong Duan
2023-10-27 14:53   ` Cédric Le Goater
2023-10-30  2:53     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 12/37] vfio/container: Move per container device list in base container Zhenzhong Duan
2023-10-27 15:03   ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 13/37] vfio/container: Convert functions to " Zhenzhong Duan
2023-10-27 15:03   ` Cédric Le Goater
2023-10-26 10:30 ` Zhenzhong Duan [this message]
2023-10-27 15:52   ` [PATCH v3 14/37] vfio/container: Move vrdl_list, pgsizes and dma_max_mappings " Cédric Le Goater
2023-10-30  3:14     ` Duan, Zhenzhong
2023-10-31  8:26       ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 15/37] vfio/container: Move listener " Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 16/37] vfio/container: Move dirty_pgsizes and max_dirty_bitmap_size " Zhenzhong Duan
2023-10-27 16:01   ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 17/37] vfio/container: Move iova_ranges " Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 18/37] vfio/container: Implement attach/detach_device Zhenzhong Duan
2023-10-27 16:04   ` Cédric Le Goater
2023-10-27 16:06   ` Cédric Le Goater
2023-10-30  3:20     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 19/37] vfio/spapr: Introduce spapr backend and target interface Zhenzhong Duan
2023-10-27 16:04   ` Cédric Le Goater
2023-10-30  3:15     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 20/37] vfio/spapr: switch to spapr IOMMU BE add/del_section_window Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 21/37] vfio/spapr: Move prereg_listener into spapr container Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 22/37] vfio/spapr: Move hostwin_list " Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 23/37] Add iommufd configure option Zhenzhong Duan
2023-10-31  8:58   ` Cédric Le Goater
2023-10-31 10:52     ` Duan, Zhenzhong
2023-10-31 11:25       ` Cédric Le Goater
2023-10-26 10:30 ` [PATCH v3 24/37] backends/iommufd: Introduce the iommufd object Zhenzhong Duan
2023-10-26 13:27   ` Markus Armbruster
2023-10-27  7:50     ` Duan, Zhenzhong
2023-10-27  8:30       ` Markus Armbruster
2023-10-27  9:41         ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 25/37] util/char_dev: Add open_cdev() Zhenzhong Duan
2023-10-30 14:53   ` Cédric Le Goater
2023-10-31  1:59     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 26/37] vfio/iommufd: Implement the iommufd backend Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 27/37] vfio/iommufd: Switch to manual hwpt allocation Zhenzhong Duan
2023-10-30 13:52   ` Cédric Le Goater
2023-10-31  2:02     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 28/37] vfio/iommufd: Add support for iova_ranges Zhenzhong Duan
2023-10-26 10:30 ` [PATCH v3 29/37] vfio/iommufd: Bypass EEH if iommufd backend Zhenzhong Duan
2023-10-30 13:56   ` Cédric Le Goater
2023-10-31  2:26     ` Duan, Zhenzhong
2023-10-31  9:01       ` Cédric Le Goater
2023-10-31  9:06         ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 30/37] vfio/pci: Extract out a helper vfio_pci_get_pci_hot_reset_info Zhenzhong Duan
2023-10-30 13:59   ` Cédric Le Goater
2023-10-31  2:30     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 31/37] vfio/pci: Adapt vfio pci hot reset support with iommufd BE Zhenzhong Duan
2023-10-30 14:04   ` Cédric Le Goater
2023-10-31  2:27     ` Duan, Zhenzhong
2023-10-26 10:30 ` [PATCH v3 32/37] vfio/pci: Allow the selection of a given iommu backend Zhenzhong Duan
2023-10-26 10:31 ` [PATCH v3 33/37] vfio/pci: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-10-26 10:31 ` [PATCH v3 34/37] vfio: Allow the selection of a given iommu backend for platform ap and ccw Zhenzhong Duan
2023-10-26 10:31 ` [PATCH v3 35/37] vfio/platform: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-10-26 10:31 ` [PATCH v3 36/37] vfio/ap: " Zhenzhong Duan
2023-10-26 10:31 ` [PATCH v3 37/37] vfio/ccw: " Zhenzhong Duan
2023-10-26 13:27 ` [PATCH v3 00/37] vfio: Adopt iommufd Markus Armbruster
2023-10-27  6:17   ` Duan, Zhenzhong
2023-10-27  7:45     ` Cédric Le Goater
2023-10-27  8:16       ` Duan, Zhenzhong
2023-10-27 13:43         ` Cédric Le Goater

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231026103104.1686921-15-zhenzhong.duan@intel.com \
    --to=zhenzhong.duan@intel.com \
    --cc=alex.williamson@redhat.com \
    --cc=chao.p.peng@intel.com \
    --cc=clg@redhat.com \
    --cc=danielhb413@gmail.com \
    --cc=david@gibson.dropbear.id.au \
    --cc=eric.auger@redhat.com \
    --cc=harshpb@linux.ibm.com \
    --cc=jasowang@redhat.com \
    --cc=jgg@nvidia.com \
    --cc=joao.m.martins@oracle.com \
    --cc=kevin.tian@intel.com \
    --cc=nicolinc@nvidia.com \
    --cc=npiggin@gmail.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    --cc=yi.l.liu@intel.com \
    --cc=yi.y.sun@intel.com \
    --cc=yi.y.sun@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).