qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: John Levon <john.levon@nutanix.com>
To: qemu-devel@nongnu.org
Cc: "Marc-André Lureau" <marcandre.lureau@redhat.com>,
	"Thanos Makatos" <thanos.makatos@nutanix.com>,
	"Daniel P. Berrangé" <berrange@redhat.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Peter Xu" <peterx@redhat.com>,
	"David Hildenbrand" <david@redhat.com>,
	"Cédric Le Goater" <clg@redhat.com>,
	"Stefano Garzarella" <sgarzare@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Alex Williamson" <alex.williamson@redhat.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"John Levon" <john.levon@nutanix.com>,
	"Steve Sistare" <steven.sistare@oracle.com>
Subject: [PATCH 01/27] vfio: return mr from vfio_get_xlat_addr
Date: Thu, 15 May 2025 16:43:46 +0100	[thread overview]
Message-ID: <20250515154413.210315-2-john.levon@nutanix.com> (raw)
In-Reply-To: <20250515154413.210315-1-john.levon@nutanix.com>

From: Steve Sistare <steven.sistare@oracle.com>

Modify memory_get_xlat_addr and vfio_get_xlat_addr to return the memory
region that the translated address is found in.  This will be needed by
CPR in a subsequent patch to map blocks using IOMMU_IOAS_MAP_FILE.

Also return the xlat offset, so we can simplify the interface by removing
the out parameters that can be trivially derived from mr and xlat.

Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
---
 include/system/memory.h | 16 +++++++---------
 hw/vfio/listener.c      | 29 +++++++++++++++++++----------
 hw/virtio/vhost-vdpa.c  |  8 ++++++--
 system/memory.c         | 25 ++++---------------------
 4 files changed, 36 insertions(+), 42 deletions(-)

diff --git a/include/system/memory.h b/include/system/memory.h
index fbbf4cf911..d74321411b 100644
--- a/include/system/memory.h
+++ b/include/system/memory.h
@@ -738,21 +738,19 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
                                              RamDiscardListener *rdl);
 
 /**
- * memory_get_xlat_addr: Extract addresses from a TLB entry
+ * memory_get_xlat_addr: Extract addresses from a TLB entry.
+ *                       Called with rcu_read_lock held.
  *
  * @iotlb: pointer to an #IOMMUTLBEntry
- * @vaddr: virtual address
- * @ram_addr: RAM address
- * @read_only: indicates if writes are allowed
- * @mr_has_discard_manager: indicates memory is controlled by a
- *                          RamDiscardManager
+ * @mr_p: return the MemoryRegion containing the @iotlb translated addr.
+ *        The MemoryRegion must not be accessed after rcu_read_unlock.
+ * @xlat_p: return the offset of the entry from the start of @mr_p
  * @errp: pointer to Error*, to store an error if it happens.
  *
  * Return: true on success, else false setting @errp with error.
  */
-bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
-                          ram_addr_t *ram_addr, bool *read_only,
-                          bool *mr_has_discard_manager, Error **errp);
+bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, MemoryRegion **mr_p,
+                          hwaddr *xlat_p, Error **errp);
 
 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c
index bfacb3d8d9..0da0b2e32a 100644
--- a/hw/vfio/listener.c
+++ b/hw/vfio/listener.c
@@ -90,16 +90,17 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
            section->offset_within_address_space & (1ULL << 63);
 }
 
-/* Called with rcu_read_lock held.  */
-static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
-                               ram_addr_t *ram_addr, bool *read_only,
-                               Error **errp)
+/*
+ * Called with rcu_read_lock held.
+ * The returned MemoryRegion must not be accessed after calling rcu_read_unlock.
+ */
+static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, MemoryRegion **mr_p,
+                               hwaddr *xlat_p, Error **errp)
 {
-    bool ret, mr_has_discard_manager;
+    bool ret;
 
-    ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
-                               &mr_has_discard_manager, errp);
-    if (ret && mr_has_discard_manager) {
+    ret = memory_get_xlat_addr(iotlb, mr_p, xlat_p, errp);
+    if (ret && memory_region_has_ram_discard_manager(*mr_p)) {
         /*
          * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
          * pages will remain pinned inside vfio until unmapped, resulting in a
@@ -126,6 +127,8 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
     VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
     VFIOContainerBase *bcontainer = giommu->bcontainer;
     hwaddr iova = iotlb->iova + giommu->iommu_offset;
+    MemoryRegion *mr;
+    hwaddr xlat;
     void *vaddr;
     int ret;
     Error *local_err = NULL;
@@ -150,10 +153,13 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
         bool read_only;
 
-        if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, &local_err)) {
+        if (!vfio_get_xlat_addr(iotlb, &mr, &xlat, &local_err)) {
             error_report_err(local_err);
             goto out;
         }
+        vaddr = memory_region_get_ram_ptr(mr) + xlat;
+        read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly;
+
         /*
          * vaddr is only valid until rcu_read_unlock(). But after
          * vfio_dma_map has set up the mapping the pages will be
@@ -1010,6 +1016,8 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
     ram_addr_t translated_addr;
     Error *local_err = NULL;
     int ret = -EINVAL;
+    MemoryRegion *mr;
+    ram_addr_t xlat;
 
     trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
 
@@ -1021,9 +1029,10 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
     }
 
     rcu_read_lock();
-    if (!vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL, &local_err)) {
+    if (!vfio_get_xlat_addr(iotlb, &mr, &xlat, &local_err)) {
         goto out_unlock;
     }
+    translated_addr = memory_region_get_ram_addr(mr) + xlat;
 
     ret = vfio_container_query_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
                                 translated_addr, &local_err);
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 1ab2c11fa8..f19136070e 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -209,6 +209,8 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
     int ret;
     Int128 llend;
     Error *local_err = NULL;
+    MemoryRegion *mr;
+    hwaddr xlat;
 
     if (iotlb->target_as != &address_space_memory) {
         error_report("Wrong target AS \"%s\", only system memory is allowed",
@@ -228,11 +230,13 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
         bool read_only;
 
-        if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL,
-                                  &local_err)) {
+        if (!memory_get_xlat_addr(iotlb, &mr, &xlat, &local_err)) {
             error_report_err(local_err);
             return;
         }
+        vaddr = memory_region_get_ram_ptr(mr) + xlat;
+        read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly;
+
         ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
                                  iotlb->addr_mask + 1, vaddr, read_only);
         if (ret) {
diff --git a/system/memory.c b/system/memory.c
index 63b983efcd..4894c0d8c1 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -2174,18 +2174,14 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
 }
 
 /* Called with rcu_read_lock held.  */
-bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
-                          ram_addr_t *ram_addr, bool *read_only,
-                          bool *mr_has_discard_manager, Error **errp)
+bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, MemoryRegion **mr_p,
+                          hwaddr *xlat_p, Error **errp)
 {
     MemoryRegion *mr;
     hwaddr xlat;
     hwaddr len = iotlb->addr_mask + 1;
     bool writable = iotlb->perm & IOMMU_WO;
 
-    if (mr_has_discard_manager) {
-        *mr_has_discard_manager = false;
-    }
     /*
      * The IOMMU TLB entry we have just covers translation through
      * this IOMMU to its immediate target.  We need to translate
@@ -2203,9 +2199,6 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
             .offset_within_region = xlat,
             .size = int128_make64(len),
         };
-        if (mr_has_discard_manager) {
-            *mr_has_discard_manager = true;
-        }
         /*
          * Malicious VMs can map memory into the IOMMU, which is expected
          * to remain discarded. vfio will pin all pages, populating memory.
@@ -2229,18 +2222,8 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
         return false;
     }
 
-    if (vaddr) {
-        *vaddr = memory_region_get_ram_ptr(mr) + xlat;
-    }
-
-    if (ram_addr) {
-        *ram_addr = memory_region_get_ram_addr(mr) + xlat;
-    }
-
-    if (read_only) {
-        *read_only = !writable || mr->readonly;
-    }
-
+    *xlat_p = xlat;
+    *mr_p = mr;
     return true;
 }
 
-- 
2.43.0



  reply	other threads:[~2025-05-15 15:48 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-15 15:43 [PATCH 00/27] vfio-user client John Levon
2025-05-15 15:43 ` John Levon [this message]
2025-05-15 15:46   ` [PATCH 01/27] vfio: return mr from vfio_get_xlat_addr John Levon
2025-05-15 15:43 ` [PATCH 02/27] vfio/container: pass MemoryRegion to DMA operations John Levon
2025-05-16 15:11   ` Cédric Le Goater
2025-05-18 17:00     ` John Levon
2025-05-19  8:17       ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 03/27] vfio: move more cleanup into vfio_pci_put_device() John Levon
2025-05-16 15:21   ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 04/27] vfio: move config space read into vfio_pci_config_setup() John Levon
2025-05-16 15:26   ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 05/27] vfio: refactor out IRQ signalling setup John Levon
2025-05-16 15:27   ` Cédric Le Goater
2025-05-15 15:43 ` [PATCH 06/27] vfio: enable per-IRQ MSI-X masking John Levon
2025-05-15 15:43 ` [PATCH 07/27] vfio: add per-region fd support John Levon
2025-05-15 15:43 ` [PATCH 08/27] vfio: mark posted writes in region write callbacks John Levon
2025-05-15 15:43 ` [PATCH 09/27] vfio-user: introduce vfio-user protocol specification John Levon
2025-05-15 15:43 ` [PATCH 10/27] vfio-user: add vfio-user class and container John Levon
2025-05-15 15:43 ` [PATCH 11/27] vfio-user: connect vfio proxy to remote server John Levon
2025-05-15 15:43 ` [PATCH 12/27] vfio-user: implement message receive infrastructure John Levon
2025-05-15 15:43 ` [PATCH 13/27] vfio-user: implement message send infrastructure John Levon
2025-05-15 15:43 ` [PATCH 14/27] vfio-user: implement VFIO_USER_DEVICE_GET_INFO John Levon
2025-05-15 15:44 ` [PATCH 15/27] vfio-user: implement VFIO_USER_DEVICE_GET_REGION_INFO John Levon
2025-05-15 15:44 ` [PATCH 16/27] vfio-user: implement VFIO_USER_REGION_READ/WRITE John Levon
2025-05-15 15:44 ` [PATCH 17/27] vfio-user: set up PCI in vfio_user_pci_realize() John Levon
2025-05-15 15:44 ` [PATCH 18/27] vfio-user: implement VFIO_USER_DEVICE_GET/SET_IRQ* John Levon
2025-05-15 15:44 ` [PATCH 19/27] vfio-user: forward MSI-X PBA BAR accesses to server John Levon
2025-05-15 15:44 ` [PATCH 20/27] vfio-user: set up container access to the proxy John Levon
2025-05-15 15:44 ` [PATCH 21/27] vfio-user: implement VFIO_USER_DEVICE_RESET John Levon
2025-05-15 15:44 ` [PATCH 22/27] vfio-user: implement VFIO_USER_DMA_MAP/UNMAP John Levon
2025-05-15 15:44 ` [PATCH 23/27] vfio-user: implement VFIO_USER_DMA_READ/WRITE John Levon
2025-05-15 15:44 ` [PATCH 24/27] vfio-user: add 'x-msg-timeout' option John Levon
2025-05-15 15:44 ` [PATCH 25/27] vfio-user: support posted writes John Levon
2025-05-15 15:44 ` [PATCH 26/27] vfio-user: add coalesced " John Levon
2025-05-15 15:44 ` [PATCH 27/27] docs: add vfio-user documentation John Levon
2025-05-19 12:40 ` [PATCH 00/27] vfio-user client Cédric Le Goater
2025-05-19 13:29   ` John Levon
2025-05-20  5:59     ` Cédric Le Goater
2025-05-20 15:05       ` John Levon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250515154413.210315-2-john.levon@nutanix.com \
    --to=john.levon@nutanix.com \
    --cc=alex.williamson@redhat.com \
    --cc=berrange@redhat.com \
    --cc=clg@redhat.com \
    --cc=david@redhat.com \
    --cc=marcandre.lureau@redhat.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sgarzare@redhat.com \
    --cc=steven.sistare@oracle.com \
    --cc=thanos.makatos@nutanix.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).