From: John Levon <john.levon@nutanix.com>
To: qemu-devel@nongnu.org
Cc: "Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Halil Pasic" <pasic@linux.ibm.com>,
"Tomita Moeko" <tomitamoeko@gmail.com>,
"Matthew Rosato" <mjrosato@linux.ibm.com>,
"Stefano Garzarella" <sgarzare@redhat.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Peter Xu" <peterx@redhat.com>,
"Cédric Le Goater" <clg@redhat.com>,
"Thomas Huth" <thuth@redhat.com>,
"Tony Krowiak" <akrowiak@linux.ibm.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Eric Farman" <farman@linux.ibm.com>,
"David Hildenbrand" <david@redhat.com>,
qemu-s390x@nongnu.org, "Jason Herne" <jjherne@linux.ibm.com>,
"John Levon" <john.levon@nutanix.com>
Subject: [PATCH v3 09/15] vfio: implement unmap all for DMA unmap callbacks
Date: Wed, 7 May 2025 16:20:14 +0100 [thread overview]
Message-ID: <20250507152020.1254632-10-john.levon@nutanix.com> (raw)
In-Reply-To: <20250507152020.1254632-1-john.levon@nutanix.com>
Handle unmap_all in the DMA unmap handlers rather than in the caller.
Signed-off-by: John Levon <john.levon@nutanix.com>
---
hw/vfio/container.c | 41 +++++++++++++++++++++++++++++++----------
hw/vfio/iommufd.c | 15 ++++++++++++++-
hw/vfio/listener.c | 19 ++++++-------------
3 files changed, 51 insertions(+), 24 deletions(-)
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index d5f4e66f1c..a9f0dbaec4 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -119,12 +119,9 @@ unmap_exit:
return ret;
}
-/*
- * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
- */
-static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
- hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb, bool unmap_all)
+static int vfio_legacy_dma_unmap_one(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
{
const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
bcontainer);
@@ -138,10 +135,6 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
int ret;
Error *local_err = NULL;
- if (unmap_all) {
- return -ENOTSUP;
- }
-
if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) {
if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) &&
bcontainer->dirty_pages_supported) {
@@ -185,6 +178,34 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
return 0;
}
+/*
+ * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
+ */
+static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb, bool unmap_all)
+{
+ int ret;
+
+ if (unmap_all) {
+ /* The unmap ioctl doesn't accept a full 64-bit span. */
+ Int128 llsize = int128_rshift(int128_2_64(), 1);
+
+ ret = vfio_legacy_dma_unmap_one(bcontainer, 0, int128_get64(llsize),
+ iotlb);
+
+ if (ret == 0) {
+ ret = vfio_legacy_dma_unmap_one(bcontainer, int128_get64(llsize),
+ int128_get64(llsize), iotlb);
+ }
+
+ } else {
+ ret = vfio_legacy_dma_unmap_one(bcontainer, iova, size, iotlb);
+ }
+
+ return ret;
+}
+
static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
ram_addr_t size, void *vaddr, bool readonly)
{
diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
index 6b2764c044..af1c7ab10a 100644
--- a/hw/vfio/iommufd.c
+++ b/hw/vfio/iommufd.c
@@ -51,8 +51,21 @@ static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer,
const VFIOIOMMUFDContainer *container =
container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
+ /* unmap in halves */
if (unmap_all) {
- return -ENOTSUP;
+ Int128 llsize = int128_rshift(int128_2_64(), 1);
+ int ret;
+
+ ret = iommufd_backend_unmap_dma(container->be, container->ioas_id,
+ 0, int128_get64(llsize));
+
+ if (ret == 0) {
+ ret = iommufd_backend_unmap_dma(container->be, container->ioas_id,
+ int128_get64(llsize),
+ int128_get64(llsize));
+ }
+
+ return ret;
}
/* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */
diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c
index c5183700db..e7ade7d62e 100644
--- a/hw/vfio/listener.c
+++ b/hw/vfio/listener.c
@@ -634,21 +634,14 @@ static void vfio_listener_region_del(MemoryListener *listener,
}
if (try_unmap) {
+ bool unmap_all = false;
+
if (int128_eq(llsize, int128_2_64())) {
- /* The unmap ioctl doesn't accept a full 64-bit span. */
- llsize = int128_rshift(llsize, 1);
- ret = vfio_container_dma_unmap(bcontainer, iova,
- int128_get64(llsize), NULL, false);
- if (ret) {
- error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%s)",
- bcontainer, iova, int128_get64(llsize), ret,
- strerror(-ret));
- }
- iova += int128_get64(llsize);
+ unmap_all = true;
+ llsize = int128_zero();
}
- ret = vfio_container_dma_unmap(bcontainer, iova,
- int128_get64(llsize), NULL, false);
+ ret = vfio_container_dma_unmap(bcontainer, iova, int128_get64(llsize),
+ NULL, unmap_all);
if (ret) {
error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
--
2.43.0
next prev parent reply other threads:[~2025-05-07 15:23 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-07 15:20 [PATCH v3 00/15] vfio: preparation for vfio-user John Levon
2025-05-07 15:20 ` [PATCH v3 01/15] vfio: add vfio_device_prepare() John Levon
2025-05-07 15:20 ` [PATCH v3 02/15] vfio: add vfio_device_unprepare() John Levon
2025-05-07 15:20 ` [PATCH v3 03/15] vfio: add vfio_attach_device_by_iommu_type() John Levon
2025-05-07 15:20 ` [PATCH v3 04/15] vfio: add vfio_device_get_irq_info() helper John Levon
2025-05-07 15:20 ` [PATCH v3 05/15] vfio: consistently handle return value for helpers John Levon
2025-05-07 15:20 ` [PATCH v3 06/15] vfio: add strread/writeerror() John Levon
2025-05-09 10:05 ` Cédric Le Goater
2025-05-07 15:20 ` [PATCH v3 07/15] vfio: add vfio_pci_config_space_read/write() John Levon
2025-05-07 15:20 ` [PATCH v3 08/15] vfio: add unmap_all flag to DMA unmap callback John Levon
2025-05-09 10:07 ` Cédric Le Goater
2025-05-07 15:20 ` John Levon [this message]
2025-05-09 10:08 ` [PATCH v3 09/15] vfio: implement unmap all for DMA unmap callbacks Cédric Le Goater
2025-05-07 15:20 ` [PATCH v3 10/15] vfio: add device IO ops vector John Levon
2025-05-09 10:09 ` Cédric Le Goater
2025-05-07 15:20 ` [PATCH v3 11/15] vfio: add region info cache John Levon
2025-05-09 10:09 ` Cédric Le Goater
2025-05-07 15:20 ` [PATCH v3 12/15] vfio: add read/write to device IO ops vector John Levon
2025-05-09 10:14 ` Cédric Le Goater
2025-05-09 10:32 ` John Levon
2025-05-07 15:20 ` [PATCH v3 13/15] vfio: add vfio-pci-base class John Levon
2025-05-09 10:14 ` Cédric Le Goater
2025-05-07 15:20 ` [PATCH v3 14/15] vfio/container: pass listener_begin/commit callbacks John Levon
2025-05-07 15:20 ` [PATCH v3 15/15] vfio/container: pass MemoryRegion to DMA operations John Levon
2025-05-09 10:22 ` Cédric Le Goater
2025-05-09 10:24 ` [PATCH v3 00/15] vfio: preparation for vfio-user Cédric Le Goater
2025-05-09 12:45 ` Cédric Le Goater
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250507152020.1254632-10-john.levon@nutanix.com \
--to=john.levon@nutanix.com \
--cc=akrowiak@linux.ibm.com \
--cc=alex.williamson@redhat.com \
--cc=clg@redhat.com \
--cc=david@redhat.com \
--cc=farman@linux.ibm.com \
--cc=jjherne@linux.ibm.com \
--cc=mjrosato@linux.ibm.com \
--cc=mst@redhat.com \
--cc=pasic@linux.ibm.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-s390x@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=thuth@redhat.com \
--cc=tomitamoeko@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).