From: Alex Williamson <alex.williamson@redhat.com>
To: John Johnson <john.g.johnson@oracle.com>
Cc: qemu-devel@nongnu.org, clg@redhat.com, philmd@linaro.org
Subject: Re: [PATCH v2 18/23] vfio-user: add dma_unmap_all
Date: Mon, 6 Feb 2023 14:29:04 -0700 [thread overview]
Message-ID: <20230206142904.073989dd.alex.williamson@redhat.com> (raw)
In-Reply-To: <20fc8b4bb94583ef41d289db3831a9d07a0eae02.1675228037.git.john.g.johnson@oracle.com>
On Wed, 1 Feb 2023 21:55:54 -0800
John Johnson <john.g.johnson@oracle.com> wrote:
> Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
> Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
> Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
> ---
> include/hw/vfio/vfio-common.h | 1 +
> hw/vfio/common.c | 45 ++++++++++++++++++++++++++++++++++---------
> hw/vfio/user.c | 24 +++++++++++++++++++++++
> 3 files changed, 61 insertions(+), 9 deletions(-)
This is inventing a need for a kernel vs user callback op rather than
probing support for VFIO_UNMAP_ALL and using VFIO_DMA_UNMAP_FLAG_VADDR
consistently. The flags arg of the dma_unmap_all() op is unused and
the kernel implementation introduces hard coded magic values. The
vfio-user dirty_bitmap addition here is rather random. Please include
some sort of description in the commit log for all patches. Thanks,
Alex
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index ee6ad8f..abef9b4 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -193,6 +193,7 @@ struct VFIOContainerIO {
> int (*dma_unmap)(VFIOContainer *container,
> struct vfio_iommu_type1_dma_unmap *unmap,
> struct vfio_bitmap *bitmap);
> + int (*dma_unmap_all)(VFIOContainer *container, uint32_t flags);
> int (*dirty_bitmap)(VFIOContainer *container,
> struct vfio_iommu_type1_dirty_bitmap *bitmap,
> struct vfio_iommu_type1_dirty_bitmap_get *range);
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index f04fd20..8b55fbb 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -508,6 +508,14 @@ static int vfio_dma_unmap(VFIOContainer *container,
> return container->io->dma_unmap(container, &unmap, NULL);
> }
>
> +/*
> + * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
> + */
> +static int vfio_dma_unmap_all(VFIOContainer *container)
> +{
> + return container->io->dma_unmap_all(container, VFIO_DMA_UNMAP_FLAG_ALL);
> +}
> +
> static int vfio_dma_map(VFIOContainer *container, MemoryRegion *mr, hwaddr iova,
> ram_addr_t size, void *vaddr, bool readonly)
> {
> @@ -1256,17 +1264,10 @@ static void vfio_listener_region_del(MemoryListener *listener,
>
> if (try_unmap) {
> if (int128_eq(llsize, int128_2_64())) {
> - /* The unmap ioctl doesn't accept a full 64-bit span. */
> - llsize = int128_rshift(llsize, 1);
> + ret = vfio_dma_unmap_all(container);
> + } else {
> ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
> - if (ret) {
> - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
> - "0x%"HWADDR_PRIx") = %d (%m)",
> - container, iova, int128_get64(llsize), ret);
> - }
> - iova += int128_get64(llsize);
> }
> - ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
> if (ret) {
> error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
> "0x%"HWADDR_PRIx") = %d (%m)",
> @@ -2867,6 +2868,31 @@ static int vfio_io_dma_unmap(VFIOContainer *container,
> return 0;
> }
>
> +static int vfio_io_dma_unmap_all(VFIOContainer *container, uint32_t flags)
> +{
> + struct vfio_iommu_type1_dma_unmap unmap = {
> + .argsz = sizeof(unmap),
> + .flags = 0,
> + .size = 0x8000000000000000,
> + };
> + int ret;
> +
> + /* The unmap ioctl doesn't accept a full 64-bit span. */
> + unmap.iova = 0;
> + ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
> + if (ret) {
> + return -errno;
> + }
> +
> + unmap.iova += unmap.size;
> + ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
> + if (ret) {
> + return -errno;
> + }
> +
> + return 0;
> +}
> +
> static int vfio_io_dirty_bitmap(VFIOContainer *container,
> struct vfio_iommu_type1_dirty_bitmap *bitmap,
> struct vfio_iommu_type1_dirty_bitmap_get *range)
> @@ -2886,6 +2912,7 @@ static void vfio_io_wait_commit(VFIOContainer *container)
> static VFIOContainerIO vfio_cont_io_ioctl = {
> .dma_map = vfio_io_dma_map,
> .dma_unmap = vfio_io_dma_unmap,
> + .dma_unmap_all = vfio_io_dma_unmap_all,
> .dirty_bitmap = vfio_io_dirty_bitmap,
> .wait_commit = vfio_io_wait_commit,
> };
> diff --git a/hw/vfio/user.c b/hw/vfio/user.c
> index 6dee775..fe6e476 100644
> --- a/hw/vfio/user.c
> +++ b/hw/vfio/user.c
> @@ -1825,6 +1825,28 @@ static int vfio_user_io_dma_unmap(VFIOContainer *container,
> container->async_ops);
> }
>
> +static int vfio_user_io_dma_unmap_all(VFIOContainer *container, uint32_t flags)
> +{
> + struct vfio_iommu_type1_dma_unmap unmap = {
> + .argsz = sizeof(unmap),
> + .flags = flags | VFIO_DMA_UNMAP_FLAG_ALL,
> + .iova = 0,
> + .size = 0,
> + };
> +
> + return vfio_user_dma_unmap(container->proxy, &unmap, NULL,
> + container->async_ops);
> +}
> +
> +static int vfio_user_io_dirty_bitmap(VFIOContainer *container,
> + struct vfio_iommu_type1_dirty_bitmap *bitmap,
> + struct vfio_iommu_type1_dirty_bitmap_get *range)
> +{
> +
> + /* vfio-user doesn't support migration */
> + return -EINVAL;
> +}
> +
> static void vfio_user_io_wait_commit(VFIOContainer *container)
> {
> vfio_user_wait_reqs(container->proxy);
> @@ -1833,5 +1855,7 @@ static void vfio_user_io_wait_commit(VFIOContainer *container)
> VFIOContainerIO vfio_cont_io_sock = {
> .dma_map = vfio_user_io_dma_map,
> .dma_unmap = vfio_user_io_dma_unmap,
> + .dma_unmap_all = vfio_user_io_dma_unmap_all,
> + .dirty_bitmap = vfio_user_io_dirty_bitmap,
> .wait_commit = vfio_user_io_wait_commit,
> };
next prev parent reply other threads:[~2023-02-06 21:29 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-02 5:55 [PATCH v2 00/23] vfio-user client John Johnson
2023-02-02 5:55 ` [PATCH v2 01/23] vfio-user: introduce vfio-user protocol specification John Johnson
2023-02-02 5:55 ` [PATCH v2 02/23] vfio-user: add VFIO base abstract class John Johnson
2023-02-02 5:55 ` [PATCH v2 03/23] vfio-user: add container IO ops vector John Johnson
2023-02-03 22:21 ` Alex Williamson
2023-02-03 22:33 ` Alex Williamson
2023-02-02 5:55 ` [PATCH v2 04/23] vfio-user: add region cache John Johnson
2023-02-02 5:55 ` [PATCH v2 05/23] vfio-user: add device IO ops vector John Johnson
2023-02-02 5:55 ` [PATCH v2 06/23] vfio-user: Define type vfio_user_pci_dev_info John Johnson
2023-02-02 5:55 ` [PATCH v2 07/23] vfio-user: connect vfio proxy to remote server John Johnson
2023-02-02 5:55 ` [PATCH v2 08/23] vfio-user: define socket receive functions John Johnson
2023-02-02 5:55 ` [PATCH v2 09/23] vfio-user: define socket send functions John Johnson
2023-02-02 5:55 ` [PATCH v2 10/23] vfio-user: get device info John Johnson
2023-02-02 5:55 ` [PATCH v2 11/23] vfio-user: get region info John Johnson
2023-02-03 23:11 ` Alex Williamson
2023-02-02 5:55 ` [PATCH v2 12/23] vfio-user: region read/write John Johnson
2023-02-06 19:07 ` Alex Williamson
2023-02-08 6:38 ` John Johnson
2023-02-08 20:33 ` Alex Williamson
2023-02-10 5:28 ` John Johnson
2023-02-02 5:55 ` [PATCH v2 13/23] vfio-user: pci_user_realize PCI setup John Johnson
2023-02-02 5:55 ` [PATCH v2 14/23] vfio-user: get and set IRQs John Johnson
2023-02-02 5:55 ` [PATCH v2 15/23] vfio-user: forward msix BAR accesses to server John Johnson
2023-02-06 20:33 ` Alex Williamson
2023-02-08 6:38 ` John Johnson
2023-02-08 21:30 ` Alex Williamson
2023-02-10 5:28 ` John Johnson
2023-02-02 5:55 ` [PATCH v2 16/23] vfio-user: proxy container connect/disconnect John Johnson
2023-02-02 5:55 ` [PATCH v2 17/23] vfio-user: dma map/unmap operations John Johnson
2023-02-03 21:28 ` Alex Williamson
2023-02-06 20:58 ` Alex Williamson
2023-02-02 5:55 ` [PATCH v2 18/23] vfio-user: add dma_unmap_all John Johnson
2023-02-06 21:29 ` Alex Williamson [this message]
2023-02-02 5:55 ` [PATCH v2 19/23] vfio-user: no-mmap DMA support John Johnson
2023-02-02 5:55 ` [PATCH v2 20/23] vfio-user: dma read/write operations John Johnson
2023-02-02 5:55 ` [PATCH v2 21/23] vfio-user: pci reset John Johnson
2023-02-02 5:55 ` [PATCH v2 22/23] vfio-user: add 'x-msg-timeout' option that specifies msg wait times John Johnson
2023-02-02 5:55 ` [PATCH v2 23/23] vfio-user: add coalesced posted writes John Johnson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230206142904.073989dd.alex.williamson@redhat.com \
--to=alex.williamson@redhat.com \
--cc=clg@redhat.com \
--cc=john.g.johnson@oracle.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).