From: "Cédric Le Goater" <clg@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Alex Williamson" <alex.williamson@redhat.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Cédric Le Goater" <clg@redhat.com>
Subject: [PULL 4/5] hw/vfio: Avoid ram_addr_t in vfio_container_query_dirty_bitmap()
Date: Fri,  3 Oct 2025 12:33:27 +0200	[thread overview]
Message-ID: <20251003103328.1864910-5-clg@redhat.com> (raw)
In-Reply-To: <20251003103328.1864910-1-clg@redhat.com>
From: Philippe Mathieu-Daudé <philmd@linaro.org>
The 'ram_addr_t' type is described as:
  a QEMU internal address space that maps guest RAM physical
  addresses into an intermediate address space that can map
  to host virtual address spaces.
vfio_container_query_dirty_bitmap() doesn't expect such QEMU
intermediate address, but a guest physical addresses. Use the
appropriate 'hwaddr' type, rename as @translated_addr for
clarity.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Link: https://lore.kernel.org/qemu-devel/20250930123528.42878-4-philmd@linaro.org
Signed-off-by: Cédric Le Goater <clg@redhat.com>
---
 include/hw/vfio/vfio-container.h |  3 ++-
 hw/vfio/container.c              | 11 ++++++-----
 hw/vfio/listener.c               | 18 +++++++++---------
 hw/vfio/trace-events             |  2 +-
 4 files changed, 18 insertions(+), 16 deletions(-)
diff --git a/include/hw/vfio/vfio-container.h b/include/hw/vfio/vfio-container.h
index b8fb2b8b5d72b1d2a4c00dc89b214b48a371f555..093c360f0eef5547d493525df64d486475d6680b 100644
--- a/include/hw/vfio/vfio-container.h
+++ b/include/hw/vfio/vfio-container.h
@@ -98,7 +98,8 @@ bool vfio_container_dirty_tracking_is_started(
 bool vfio_container_devices_dirty_tracking_is_supported(
     const VFIOContainer *bcontainer);
 int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
-    uint64_t iova, uint64_t size, ram_addr_t ram_addr, Error **errp);
+                                      uint64_t iova, uint64_t size,
+                                      hwaddr translated_addr, Error **errp);
 
 GList *vfio_container_get_iova_ranges(const VFIOContainer *bcontainer);
 
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index 250b20f424522f4b6a4e864906eed2d8d13efbcd..9d69439371402940fcbc926737215eb9308b237a 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -246,7 +246,7 @@ static int vfio_container_devices_query_dirty_bitmap(
 
 int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
                                       uint64_t iova, uint64_t size,
-                                      ram_addr_t ram_addr, Error **errp)
+                                      hwaddr translated_addr, Error **errp)
 {
     bool all_device_dirty_tracking =
         vfio_container_devices_dirty_tracking_is_supported(bcontainer);
@@ -255,7 +255,7 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
     int ret;
 
     if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
-        cpu_physical_memory_set_dirty_range(ram_addr, size,
+        cpu_physical_memory_set_dirty_range(translated_addr, size,
                                             tcg_enabled() ? DIRTY_CLIENTS_ALL :
                                             DIRTY_CLIENTS_NOCODE);
         return 0;
@@ -280,11 +280,12 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
         goto out;
     }
 
-    dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
+    dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
+                                                         translated_addr,
                                                          vbmap.pages);
 
-    trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size, ram_addr,
-                                            dirty_pages);
+    trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size,
+                                            translated_addr, dirty_pages);
 out:
     g_free(vbmap.bitmap);
 
diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c
index 3b6f17f0c3aa7ef08091f8cc1c3230eff97b5cd7..a2c19a3cec1a923631fe5c31d084cd7615f888c0 100644
--- a/hw/vfio/listener.c
+++ b/hw/vfio/listener.c
@@ -1059,7 +1059,7 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
     VFIOGuestIOMMU *giommu = gdn->giommu;
     VFIOContainer *bcontainer = giommu->bcontainer;
     hwaddr iova = iotlb->iova + giommu->iommu_offset;
-    ram_addr_t translated_addr;
+    hwaddr translated_addr;
     Error *local_err = NULL;
     int ret = -EINVAL;
     MemoryRegion *mr;
@@ -1108,8 +1108,8 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section,
 {
     const hwaddr size = int128_get64(section->size);
     const hwaddr iova = section->offset_within_address_space;
-    const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
-                                section->offset_within_region;
+    const hwaddr translated_addr = memory_region_get_ram_addr(section->mr) +
+                                   section->offset_within_region;
     VFIORamDiscardListener *vrdl = opaque;
     Error *local_err = NULL;
     int ret;
@@ -1118,8 +1118,8 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section,
      * Sync the whole mapped region (spanning multiple individual mappings)
      * in one go.
      */
-    ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr,
-                                &local_err);
+    ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size,
+                                            translated_addr, &local_err);
     if (ret) {
         error_report_err(local_err);
     }
@@ -1183,7 +1183,7 @@ static int vfio_sync_iommu_dirty_bitmap(VFIOContainer *bcontainer,
 static int vfio_sync_dirty_bitmap(VFIOContainer *bcontainer,
                                   MemoryRegionSection *section, Error **errp)
 {
-    ram_addr_t ram_addr;
+    hwaddr translated_addr;
 
     if (memory_region_is_iommu(section->mr)) {
         return vfio_sync_iommu_dirty_bitmap(bcontainer, section);
@@ -1198,12 +1198,12 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *bcontainer,
         return ret;
     }
 
-    ram_addr = memory_region_get_ram_addr(section->mr) +
-               section->offset_within_region;
+    translated_addr = memory_region_get_ram_addr(section->mr) +
+                      section->offset_within_region;
 
     return vfio_container_query_dirty_bitmap(bcontainer,
                    REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
-                                 int128_get64(section->size), ram_addr, errp);
+                   int128_get64(section->size), translated_addr, errp);
 }
 
 static void vfio_listener_log_sync(MemoryListener *listener,
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index b1b470cc295ecd07db60910843d9fe869ab7b924..1e895448cd9b4f6b0dd5bb445de4acef6090ec62 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -105,7 +105,7 @@ vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32,
 vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
 
 # container.c
-vfio_container_query_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64
+vfio_container_query_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t translated_addr, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" gpa=0x%"PRIx64" dirty_pages=%"PRIu64
 
 # container-legacy.c
 vfio_container_disconnect(int fd) "close container->fd=%d"
-- 
2.51.0
next prev parent reply	other threads:[~2025-10-03 10:35 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-03 10:33 [PULL 0/5] vfio queue Cédric Le Goater
2025-10-03 10:33 ` [PULL 1/5] vfio: Remove workaround for kernel DMA unmap overflow bug Cédric Le Goater
2025-10-03 10:33 ` [PULL 2/5] system/iommufd: Use uint64_t type for IOVA mapping size Cédric Le Goater
2025-10-03 10:33 ` [PULL 3/5] hw/vfio: Reorder vfio_container_query_dirty_bitmap() trace format Cédric Le Goater
2025-10-03 10:33 ` Cédric Le Goater [this message]
2025-10-03 10:33 ` [PULL 5/5] hw/vfio: Use uint64_t for IOVA mapping size in vfio_container_dma_*map Cédric Le Goater
2025-10-03 17:33 ` [PULL 0/5] vfio queue Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox
  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):
  git send-email \
    --in-reply-to=20251003103328.1864910-5-clg@redhat.com \
    --to=clg@redhat.com \
    --cc=alex.williamson@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY
  https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
  Be sure your reply has a Subject: header at the top and a blank line
  before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).