From: "Cédric Le Goater" <clg@redhat.com>
To: Zhenzhong Duan <zhenzhong.duan@intel.com>, qemu-devel@nongnu.org
Cc: alex.williamson@redhat.com, jgg@nvidia.com, nicolinc@nvidia.com,
joao.m.martins@oracle.com, eric.auger@redhat.com,
peterx@redhat.com, jasowang@redhat.com, kevin.tian@intel.com,
yi.l.liu@intel.com, yi.y.sun@intel.com, chao.p.peng@intel.com,
Nicholas Piggin <npiggin@gmail.com>,
Daniel Henrique Barboza <danielhb413@gmail.com>,
David Gibson <david@gibson.dropbear.id.au>,
Harsh Prateek Bora <harshpb@linux.ibm.com>,
"open list:sPAPR (pseries)" <qemu-ppc@nongnu.org>
Subject: Re: [PATCH v4 05/41] vfio/common: Move vfio_host_win_add/del into spapr.c
Date: Mon, 6 Nov 2023 10:33:21 +0100 [thread overview]
Message-ID: <cbd33911-f04a-4323-9a44-c2a43e1df069@redhat.com> (raw)
In-Reply-To: <20231102071302.1818071-6-zhenzhong.duan@intel.com>
On 11/2/23 08:12, Zhenzhong Duan wrote:
> Only spapr supports a customed host window list, other vfio driver
> assume 64bit host window. So remove the check in listener callback
> and move vfio_host_win_add/del into spapr.c and make it static.
>
> With the check removed, we still need to do the same check for
> VFIO_SPAPR_TCE_IOMMU which allows a single host window range
> [dma32_window_start, dma32_window_size). Move vfio_find_hostwin
> into spapr.c and do same check in vfio_container_add_section_window
> instead.
>
> When mapping a ram device section, if it's unaligned with
> hostwin->iova_pgsizes, this mapping is bypassed. With hostwin
> moved into spapr, we changed to check container->pgsizes.
>
> Suggested-by: Alex Williamson <alex.williamson@redhat.com>
> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Thanks,
C.
> ---
> v4: add vfio_find_hostwin back for VFIO_SPAPR_TCE_IOMMU
>
> include/hw/vfio/vfio-common.h | 5 ---
> hw/vfio/common.c | 70 +----------------------------
> hw/vfio/container.c | 16 -------
> hw/vfio/spapr.c | 83 +++++++++++++++++++++++++++++++++++
> 4 files changed, 85 insertions(+), 89 deletions(-)
>
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 87848982bd..a4a22accb9 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -207,11 +207,6 @@ typedef struct {
> hwaddr pages;
> } VFIOBitmap;
>
> -void vfio_host_win_add(VFIOContainer *container,
> - hwaddr min_iova, hwaddr max_iova,
> - uint64_t iova_pgsizes);
> -int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
> - hwaddr max_iova);
> VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
> void vfio_put_address_space(VFIOAddressSpace *space);
> bool vfio_devices_all_running_and_saving(VFIOContainer *container);
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index e72055e752..e70fdf5e0c 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -245,44 +245,6 @@ bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
> return true;
> }
>
> -void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova,
> - hwaddr max_iova, uint64_t iova_pgsizes)
> -{
> - VFIOHostDMAWindow *hostwin;
> -
> - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
> - if (ranges_overlap(hostwin->min_iova,
> - hostwin->max_iova - hostwin->min_iova + 1,
> - min_iova,
> - max_iova - min_iova + 1)) {
> - hw_error("%s: Overlapped IOMMU are not enabled", __func__);
> - }
> - }
> -
> - hostwin = g_malloc0(sizeof(*hostwin));
> -
> - hostwin->min_iova = min_iova;
> - hostwin->max_iova = max_iova;
> - hostwin->iova_pgsizes = iova_pgsizes;
> - QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
> -}
> -
> -int vfio_host_win_del(VFIOContainer *container,
> - hwaddr min_iova, hwaddr max_iova)
> -{
> - VFIOHostDMAWindow *hostwin;
> -
> - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
> - if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
> - QLIST_REMOVE(hostwin, hostwin_next);
> - g_free(hostwin);
> - return 0;
> - }
> - }
> -
> - return -1;
> -}
> -
> static bool vfio_listener_skipped_section(MemoryRegionSection *section)
> {
> return (!memory_region_is_ram(section->mr) &&
> @@ -531,22 +493,6 @@ static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
> g_free(vrdl);
> }
>
> -static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container,
> - hwaddr iova, hwaddr end)
> -{
> - VFIOHostDMAWindow *hostwin;
> - bool hostwin_found = false;
> -
> - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
> - if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
> - hostwin_found = true;
> - break;
> - }
> - }
> -
> - return hostwin_found ? hostwin : NULL;
> -}
> -
> static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
> {
> MemoryRegion *mr = section->mr;
> @@ -625,7 +571,6 @@ static void vfio_listener_region_add(MemoryListener *listener,
> Int128 llend, llsize;
> void *vaddr;
> int ret;
> - VFIOHostDMAWindow *hostwin;
> Error *err = NULL;
>
> if (!vfio_listener_valid_section(section, "region_add")) {
> @@ -647,13 +592,6 @@ static void vfio_listener_region_add(MemoryListener *listener,
> goto fail;
> }
>
> - hostwin = vfio_find_hostwin(container, iova, end);
> - if (!hostwin) {
> - error_setg(&err, "Container %p can't map guest IOVA region"
> - " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end);
> - goto fail;
> - }
> -
> memory_region_ref(section->mr);
>
> if (memory_region_is_iommu(section->mr)) {
> @@ -734,7 +672,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
> llsize = int128_sub(llend, int128_make64(iova));
>
> if (memory_region_is_ram_device(section->mr)) {
> - hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
> + hwaddr pgmask = (1ULL << ctz64(container->pgsizes)) - 1;
>
> if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
> trace_vfio_listener_region_add_no_dma_map(
> @@ -833,12 +771,8 @@ static void vfio_listener_region_del(MemoryListener *listener,
>
> if (memory_region_is_ram_device(section->mr)) {
> hwaddr pgmask;
> - VFIOHostDMAWindow *hostwin;
> -
> - hostwin = vfio_find_hostwin(container, iova, end);
> - assert(hostwin); /* or region_add() would have failed */
>
> - pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
> + pgmask = (1ULL << ctz64(container->pgsizes)) - 1;
> try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
> } else if (memory_region_has_ram_discard_manager(section->mr)) {
> vfio_unregister_ram_discard_listener(container, section);
> diff --git a/hw/vfio/container.c b/hw/vfio/container.c
> index 204b244b11..242010036a 100644
> --- a/hw/vfio/container.c
> +++ b/hw/vfio/container.c
> @@ -551,7 +551,6 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
> container->dma_max_mappings = 0;
> container->iova_ranges = NULL;
> QLIST_INIT(&container->giommu_list);
> - QLIST_INIT(&container->hostwin_list);
> QLIST_INIT(&container->vrdl_list);
>
> ret = vfio_init_container(container, group->fd, errp);
> @@ -591,14 +590,6 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>
> vfio_get_iommu_info_migration(container, info);
> g_free(info);
> -
> - /*
> - * FIXME: We should parse VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
> - * information to get the actual window extent rather than assume
> - * a 64-bit IOVA address space.
> - */
> - vfio_host_win_add(container, 0, (hwaddr)-1, container->pgsizes);
> -
> break;
> }
> case VFIO_SPAPR_TCE_v2_IOMMU:
> @@ -687,7 +678,6 @@ static void vfio_disconnect_container(VFIOGroup *group)
> if (QLIST_EMPTY(&container->group_list)) {
> VFIOAddressSpace *space = container->space;
> VFIOGuestIOMMU *giommu, *tmp;
> - VFIOHostDMAWindow *hostwin, *next;
>
> QLIST_REMOVE(container, next);
>
> @@ -698,12 +688,6 @@ static void vfio_disconnect_container(VFIOGroup *group)
> g_free(giommu);
> }
>
> - QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
> - next) {
> - QLIST_REMOVE(hostwin, hostwin_next);
> - g_free(hostwin);
> - }
> -
> trace_vfio_disconnect_container(container->fd);
> close(container->fd);
> vfio_free_container(container);
> diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
> index 4428990c28..83da2f7ec2 100644
> --- a/hw/vfio/spapr.c
> +++ b/hw/vfio/spapr.c
> @@ -146,6 +146,60 @@ static const MemoryListener vfio_prereg_listener = {
> .region_del = vfio_prereg_listener_region_del,
> };
>
> +static void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova,
> + hwaddr max_iova, uint64_t iova_pgsizes)
> +{
> + VFIOHostDMAWindow *hostwin;
> +
> + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
> + if (ranges_overlap(hostwin->min_iova,
> + hostwin->max_iova - hostwin->min_iova + 1,
> + min_iova,
> + max_iova - min_iova + 1)) {
> + hw_error("%s: Overlapped IOMMU are not enabled", __func__);
> + }
> + }
> +
> + hostwin = g_malloc0(sizeof(*hostwin));
> +
> + hostwin->min_iova = min_iova;
> + hostwin->max_iova = max_iova;
> + hostwin->iova_pgsizes = iova_pgsizes;
> + QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
> +}
> +
> +static int vfio_host_win_del(VFIOContainer *container,
> + hwaddr min_iova, hwaddr max_iova)
> +{
> + VFIOHostDMAWindow *hostwin;
> +
> + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
> + if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
> + QLIST_REMOVE(hostwin, hostwin_next);
> + g_free(hostwin);
> + return 0;
> + }
> + }
> +
> + return -1;
> +}
> +
> +static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container,
> + hwaddr iova, hwaddr end)
> +{
> + VFIOHostDMAWindow *hostwin;
> + bool hostwin_found = false;
> +
> + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
> + if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
> + hostwin_found = true;
> + break;
> + }
> + }
> +
> + return hostwin_found ? hostwin : NULL;
> +}
> +
> static int vfio_spapr_remove_window(VFIOContainer *container,
> hwaddr offset_within_address_space)
> {
> @@ -267,6 +321,26 @@ int vfio_container_add_section_window(VFIOContainer *container,
> hwaddr pgsize = 0;
> int ret;
>
> + /*
> + * VFIO_SPAPR_TCE_IOMMU supports a single host window between
> + * [dma32_window_start, dma32_window_size), we need to ensure
> + * the section fall in this range.
> + */
> + if (container->iommu_type == VFIO_SPAPR_TCE_IOMMU) {
> + hwaddr iova, end;
> +
> + iova = section->offset_within_address_space;
> + end = iova + int128_get64(section->size) - 1;
> +
> + if (!vfio_find_hostwin(container, iova, end)) {
> + error_setg(errp, "Container %p can't map guest IOVA region"
> + " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container,
> + iova, end);
> + return -EINVAL;
> + }
> + return 0;
> + }
> +
> if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) {
> return 0;
> }
> @@ -351,6 +425,8 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp)
> bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
> int ret, fd = container->fd;
>
> + QLIST_INIT(&container->hostwin_list);
> +
> /*
> * The host kernel code implementing VFIO_IOMMU_DISABLE is called
> * when container fd is closed so we do not call it explicitly
> @@ -418,7 +494,14 @@ listener_unregister_exit:
>
> void vfio_spapr_container_deinit(VFIOContainer *container)
> {
> + VFIOHostDMAWindow *hostwin, *next;
> +
> if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
> memory_listener_unregister(&container->prereg_listener);
> }
> + QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
> + next) {
> + QLIST_REMOVE(hostwin, hostwin_next);
> + g_free(hostwin);
> + }
> }
next prev parent reply other threads:[~2023-11-06 9:33 UTC|newest]
Thread overview: 114+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-02 7:12 [PATCH v4 00/41] vfio: Adopt iommufd Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 01/41] vfio/container: Move IBM EEH related functions into spapr_pci_vfio.c Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 02/41] vfio/container: Move vfio_container_add/del_section_window into spapr.c Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 03/41] vfio/container: Move spapr specific init/deinit " Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 04/41] vfio/spapr: Make vfio_spapr_create/remove_window static Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 05/41] vfio/common: Move vfio_host_win_add/del into spapr.c Zhenzhong Duan
2023-11-06 9:33 ` Cédric Le Goater [this message]
2023-11-02 7:12 ` [PATCH v4 06/41] vfio: Introduce base object for VFIOContainer and targeted interface Zhenzhong Duan
2023-11-06 16:36 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 07/41] vfio/container: Introduce a empty VFIOIOMMUOps Zhenzhong Duan
2023-11-06 16:36 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 08/41] vfio/container: Switch to dma_map|unmap API Zhenzhong Duan
2023-11-06 16:37 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 09/41] vfio/common: Introduce vfio_container_init/destroy helper Zhenzhong Duan
2023-11-06 16:37 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 10/41] vfio/common: Move giommu_list in base container Zhenzhong Duan
2023-11-06 16:50 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 11/41] vfio/container: Move space field to " Zhenzhong Duan
2023-11-06 16:50 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 12/41] vfio/container: Switch to IOMMU BE set_dirty_page_tracking/query_dirty_bitmap API Zhenzhong Duan
2023-11-06 16:50 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 13/41] vfio/container: Move per container device list in base container Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 14/41] vfio/container: Convert functions to " Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 15/41] vfio/container: Move pgsizes and dma_max_mappings " Zhenzhong Duan
2023-11-06 16:53 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 16/41] vfio/container: Move vrdl_list " Zhenzhong Duan
2023-11-06 16:53 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 17/41] vfio/container: Move listener " Zhenzhong Duan
2023-11-06 16:57 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 18/41] vfio/container: Move dirty_pgsizes and max_dirty_bitmap_size " Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 19/41] vfio/container: Move iova_ranges " Zhenzhong Duan
2023-11-06 16:58 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 20/41] vfio/container: Implement attach/detach_device Zhenzhong Duan
2023-11-06 16:59 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 21/41] vfio/spapr: Introduce spapr backend and target interface Zhenzhong Duan
2023-11-06 17:30 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 22/41] vfio/spapr: switch to spapr IOMMU BE add/del_section_window Zhenzhong Duan
2023-11-06 17:33 ` Cédric Le Goater
2023-11-07 3:06 ` Duan, Zhenzhong
2023-11-07 13:07 ` Cédric Le Goater
2023-11-07 17:34 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 23/41] vfio/spapr: Move prereg_listener into spapr container Zhenzhong Duan
2023-11-06 17:34 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 24/41] vfio/spapr: Move hostwin_list " Zhenzhong Duan
2023-11-06 17:35 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 25/41] Add iommufd configure option Zhenzhong Duan
2023-11-07 13:14 ` Cédric Le Goater
2023-11-07 14:37 ` Cédric Le Goater
2023-11-08 6:08 ` Duan, Zhenzhong
2023-11-02 7:12 ` [PATCH v4 26/41] backends/iommufd: Introduce the iommufd object Zhenzhong Duan
2023-11-07 13:33 ` Cédric Le Goater
2023-11-08 3:35 ` Duan, Zhenzhong
2023-11-08 9:40 ` Cédric Le Goater
2023-11-08 9:43 ` Duan, Zhenzhong
2023-11-08 5:50 ` Markus Armbruster
2023-11-08 10:03 ` Cédric Le Goater
2023-11-08 10:30 ` Markus Armbruster
2023-11-08 13:48 ` Cédric Le Goater
2023-11-09 9:05 ` Markus Armbruster
2023-11-10 2:03 ` Duan, Zhenzhong
2023-11-14 9:40 ` Cédric Le Goater
2023-11-14 10:18 ` Duan, Zhenzhong
2023-11-02 7:12 ` [PATCH v4 27/41] util/char_dev: Add open_cdev() Zhenzhong Duan
2023-11-07 13:37 ` Cédric Le Goater
2023-11-08 4:29 ` Duan, Zhenzhong
2023-11-02 7:12 ` [PATCH v4 28/41] vfio/iommufd: Implement the iommufd backend Zhenzhong Duan
2023-11-07 13:41 ` Cédric Le Goater
2023-11-08 5:45 ` Duan, Zhenzhong
2023-11-08 2:59 ` Matthew Rosato
2023-11-08 7:16 ` Duan, Zhenzhong
2023-11-08 12:48 ` Jason Gunthorpe
2023-11-08 13:25 ` Duan, Zhenzhong
2023-11-08 14:19 ` Jason Gunthorpe
2023-11-09 2:45 ` Duan, Zhenzhong
2023-11-09 12:17 ` Joao Martins
2023-11-09 12:57 ` Jason Gunthorpe
2023-11-09 12:59 ` Joao Martins
2023-11-09 13:03 ` Joao Martins
2023-11-09 13:09 ` Jason Gunthorpe
2023-11-09 13:21 ` Joao Martins
2023-11-09 14:34 ` Jason Gunthorpe
2023-11-10 3:15 ` Duan, Zhenzhong
2023-11-10 13:09 ` Joao Martins
2023-11-13 3:17 ` Duan, Zhenzhong
2023-11-02 7:12 ` [PATCH v4 29/41] vfio/iommufd: Relax assert check for " Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 30/41] vfio/iommufd: Add support for iova_ranges Zhenzhong Duan
2023-11-06 17:19 ` Cédric Le Goater
2023-11-07 3:07 ` Duan, Zhenzhong
2023-11-02 7:12 ` [PATCH v4 31/41] vfio/pci: Extract out a helper vfio_pci_get_pci_hot_reset_info Zhenzhong Duan
2023-11-07 13:48 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 32/41] vfio/pci: Introduce a vfio pci hot reset interface Zhenzhong Duan
2023-11-07 13:52 ` Cédric Le Goater
2023-11-08 5:46 ` Duan, Zhenzhong
2023-11-02 7:12 ` [PATCH v4 33/41] vfio/iommufd: Enable pci hot reset through iommufd cdev interface Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 34/41] vfio/pci: Allow the selection of a given iommu backend Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 35/41] vfio/pci: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 36/41] vfio: Allow the selection of a given iommu backend for platform ap and ccw Zhenzhong Duan
2023-11-07 18:18 ` Cédric Le Goater
2023-11-02 7:12 ` [PATCH v4 37/41] vfio/platform: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-11-02 7:12 ` [PATCH v4 38/41] vfio/ap: " Zhenzhong Duan
2023-11-07 18:19 ` Cédric Le Goater
2023-11-02 7:13 ` [PATCH v4 39/41] vfio/ccw: " Zhenzhong Duan
2023-11-07 18:20 ` Cédric Le Goater
2023-11-02 7:13 ` [PATCH v4 40/41] vfio: Make VFIOContainerBase poiner parameter const in VFIOIOMMUOps callbacks Zhenzhong Duan
2023-11-02 7:13 ` [PATCH v4 41/41] vfio: Compile out iommufd for PPC target Zhenzhong Duan
2023-11-07 13:44 ` Cédric Le Goater
2023-11-08 4:31 ` Duan, Zhenzhong
2023-11-06 14:23 ` [PATCH v4 00/41] vfio: Adopt iommufd Cédric Le Goater
2023-11-07 18:28 ` Cédric Le Goater
2023-11-08 3:26 ` Matthew Rosato
2023-11-08 8:37 ` Duan, Zhenzhong
2023-11-08 9:07 ` Duan, Zhenzhong
2023-11-08 9:23 ` Cédric Le Goater
2023-11-08 9:21 ` Cédric Le Goater
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=cbd33911-f04a-4323-9a44-c2a43e1df069@redhat.com \
--to=clg@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=chao.p.peng@intel.com \
--cc=danielhb413@gmail.com \
--cc=david@gibson.dropbear.id.au \
--cc=eric.auger@redhat.com \
--cc=harshpb@linux.ibm.com \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=joao.m.martins@oracle.com \
--cc=kevin.tian@intel.com \
--cc=nicolinc@nvidia.com \
--cc=npiggin@gmail.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@intel.com \
--cc=zhenzhong.duan@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).