qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Hildenbrand <david@redhat.com>
To: qemu-devel@nongnu.org
Cc: David Hildenbrand <david@redhat.com>,
	"Michael S . Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	Germano Veit Michel <germano@redhat.com>,
	Raphael Norwitz <raphael.norwitz@nutanix.com>
Subject: [PATCH v1 05/15] libvhost-user: Merge vu_set_mem_table_exec_postcopy() into vu_set_mem_table_exec()
Date: Fri,  2 Feb 2024 22:53:22 +0100	[thread overview]
Message-ID: <20240202215332.118728-6-david@redhat.com> (raw)
In-Reply-To: <20240202215332.118728-1-david@redhat.com>

Let's reduce some code duplication and prepare for further changes.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 subprojects/libvhost-user/libvhost-user.c | 119 +++++++---------------
 1 file changed, 39 insertions(+), 80 deletions(-)

diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index d5b3468e43..d9e2214ad2 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -937,95 +937,23 @@ vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
 }
 
 static bool
-vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
+vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
 {
-    unsigned int i;
     VhostUserMemory m = vmsg->payload.memory, *memory = &m;
-    dev->nregions = memory->nregions;
-
-    DPRINT("Nregions: %u\n", memory->nregions);
-    for (i = 0; i < dev->nregions; i++) {
-        void *mmap_addr;
-        VhostUserMemoryRegion *msg_region = &memory->regions[i];
-        VuDevRegion *dev_region = &dev->regions[i];
-
-        DPRINT("Region %d\n", i);
-        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
-               msg_region->guest_phys_addr);
-        DPRINT("    memory_size:     0x%016"PRIx64"\n",
-               msg_region->memory_size);
-        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
-               msg_region->userspace_addr);
-        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
-               msg_region->mmap_offset);
-
-        dev_region->gpa = msg_region->guest_phys_addr;
-        dev_region->size = msg_region->memory_size;
-        dev_region->qva = msg_region->userspace_addr;
-        dev_region->mmap_offset = msg_region->mmap_offset;
+    int prot = PROT_READ | PROT_WRITE;
+    unsigned int i;
 
-        /* We don't use offset argument of mmap() since the
-         * mapped address has to be page aligned, and we use huge
-         * pages.
+    if (dev->postcopy_listening) {
+        /*
          * In postcopy we're using PROT_NONE here to catch anyone
          * accessing it before we userfault
          */
-        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
-                         PROT_NONE, MAP_SHARED | MAP_NORESERVE,
-                         vmsg->fds[i], 0);
-
-        if (mmap_addr == MAP_FAILED) {
-            vu_panic(dev, "region mmap error: %s", strerror(errno));
-        } else {
-            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
-            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
-                   dev_region->mmap_addr);
-        }
-
-        /* Return the address to QEMU so that it can translate the ufd
-         * fault addresses back.
-         */
-        msg_region->userspace_addr = dev_region->mmap_addr +
-                                     dev_region->mmap_offset;
-        close(vmsg->fds[i]);
-    }
-
-    /* Send the message back to qemu with the addresses filled in */
-    vmsg->fd_num = 0;
-    if (!vu_send_reply(dev, dev->sock, vmsg)) {
-        vu_panic(dev, "failed to respond to set-mem-table for postcopy");
-        return false;
-    }
-
-    /* Wait for QEMU to confirm that it's registered the handler for the
-     * faults.
-     */
-    if (!dev->read_msg(dev, dev->sock, vmsg) ||
-        vmsg->size != sizeof(vmsg->payload.u64) ||
-        vmsg->payload.u64 != 0) {
-        vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
-        return false;
+        prot = PROT_NONE;
     }
 
-    /* OK, now we can go and register the memory and generate faults */
-    (void)generate_faults(dev);
-
-    return false;
-}
-
-static bool
-vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
-{
-    unsigned int i;
-    VhostUserMemory m = vmsg->payload.memory, *memory = &m;
-
     vu_remove_all_mem_regs(dev);
     dev->nregions = memory->nregions;
 
-    if (dev->postcopy_listening) {
-        return vu_set_mem_table_exec_postcopy(dev, vmsg);
-    }
-
     DPRINT("Nregions: %u\n", memory->nregions);
     for (i = 0; i < dev->nregions; i++) {
         void *mmap_addr;
@@ -1051,8 +979,7 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
          * mapped address has to be page aligned, and we use huge
          * pages.  */
         mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
-                         PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE,
-                         vmsg->fds[i], 0);
+                         prot, MAP_SHARED | MAP_NORESERVE, vmsg->fds[i], 0);
 
         if (mmap_addr == MAP_FAILED) {
             vu_panic(dev, "region mmap error: %s", strerror(errno));
@@ -1062,9 +989,41 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
                    dev_region->mmap_addr);
         }
 
+        if (dev->postcopy_listening) {
+            /*
+             * Return the address to QEMU so that it can translate the ufd
+             * fault addresses back.
+             */
+            msg_region->userspace_addr = dev_region->mmap_addr +
+                                         dev_region->mmap_offset;
+        }
         close(vmsg->fds[i]);
     }
 
+    if (dev->postcopy_listening) {
+        /* Send the message back to qemu with the addresses filled in */
+        vmsg->fd_num = 0;
+        if (!vu_send_reply(dev, dev->sock, vmsg)) {
+            vu_panic(dev, "failed to respond to set-mem-table for postcopy");
+            return false;
+        }
+
+        /*
+         * Wait for QEMU to confirm that it's registered the handler for the
+         * faults.
+         */
+        if (!dev->read_msg(dev, dev->sock, vmsg) ||
+            vmsg->size != sizeof(vmsg->payload.u64) ||
+            vmsg->payload.u64 != 0) {
+            vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
+            return false;
+        }
+
+        /* OK, now we can go and register the memory and generate faults */
+        (void)generate_faults(dev);
+        return false;
+    }
+
     for (i = 0; i < dev->max_queues; i++) {
         if (dev->vq[i].vring.desc) {
             if (map_ring(dev, &dev->vq[i])) {
-- 
2.43.0



  parent reply	other threads:[~2024-02-02 21:56 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-02 21:53 [PATCH v1 00/15] libvhost-user: support more memslots and cleanup memslot handling code David Hildenbrand
2024-02-02 21:53 ` [PATCH v1 01/15] libvhost-user: Fix msg_region->userspace_addr computation David Hildenbrand
2024-02-04  1:35   ` Raphael Norwitz
2024-02-04 14:36     ` David Hildenbrand
2024-02-04 22:01       ` Raphael Norwitz
2024-02-05  7:32         ` David Hildenbrand
2024-02-13 17:32   ` Michael S. Tsirkin
2024-02-13 18:25     ` David Hildenbrand
2024-02-02 21:53 ` [PATCH v1 02/15] libvhost-user: Dynamically allocate memory for memory slots David Hildenbrand
2024-02-04  1:36   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 03/15] libvhost-user: Bump up VHOST_USER_MAX_RAM_SLOTS to 509 David Hildenbrand
2024-02-04  1:42   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 04/15] libvhost-user: Factor out removing all mem regions David Hildenbrand
2024-02-04  1:43   ` Raphael Norwitz
2024-02-02 21:53 ` David Hildenbrand [this message]
2024-02-04  1:44   ` [PATCH v1 05/15] libvhost-user: Merge vu_set_mem_table_exec_postcopy() into vu_set_mem_table_exec() Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 06/15] libvhost-user: Factor out adding a memory region David Hildenbrand
2024-02-04  1:44   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 07/15] libvhost-user: No need to check for NULL when unmapping David Hildenbrand
2024-02-04  1:45   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 08/15] libvhost-user: Don't zero out memory for memory regions David Hildenbrand
2024-02-04  1:46   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 09/15] libvhost-user: Don't search for duplicates when removing " David Hildenbrand
2024-02-04  1:47   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 10/15] libvhost-user: Factor out search for memory region by GPA and simplify David Hildenbrand
2024-02-04  1:47   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 11/15] libvhost-user: Speedup gpa_to_mem_region() and vu_gpa_to_va() David Hildenbrand
2024-02-04  2:10   ` Raphael Norwitz
2024-02-04 14:51     ` David Hildenbrand
2024-02-04 22:07       ` Raphael Norwitz
2024-02-05  7:32         ` David Hildenbrand
2024-02-02 21:53 ` [PATCH v1 12/15] libvhost-user: Use most of mmap_offset as fd_offset David Hildenbrand
2024-02-04  2:11   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 13/15] libvhost-user: Factor out vq usability check David Hildenbrand
2024-02-04  2:11   ` Raphael Norwitz
2024-02-02 21:53 ` [PATCH v1 14/15] libvhost-user: Dynamically remap rings after (temporarily?) removing memory regions David Hildenbrand
2024-02-04  2:15   ` Raphael Norwitz
2024-02-04 14:58     ` David Hildenbrand
2024-02-02 21:53 ` [PATCH v1 15/15] libvhost-user: Mark mmap'ed region memory as MADV_DONTDUMP David Hildenbrand
2024-02-04  2:16   ` Raphael Norwitz
2024-02-07 11:40 ` [PATCH v1 00/15] libvhost-user: support more memslots and cleanup memslot handling code Stefano Garzarella
2024-02-09 22:36   ` David Hildenbrand
2024-02-13 17:33 ` Michael S. Tsirkin
2024-02-13 18:27   ` David Hildenbrand
2024-02-13 18:55     ` Michael S. Tsirkin
2024-02-14 11:06       ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240202215332.118728-6-david@redhat.com \
    --to=david@redhat.com \
    --cc=germano@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=raphael.norwitz@nutanix.com \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).