From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
David Hildenbrand <david@redhat.com>,
Raphael Norwitz <raphael@enfabrica.net>,
Stefano Garzarella <sgarzare@redhat.com>,
Mario Casquero <mcasquer@redhat.com>
Subject: [PULL 16/68] libvhost-user: Factor out adding a memory region
Date: Tue, 12 Mar 2024 18:26:17 -0400 [thread overview]
Message-ID: <93fec23d8cecebf0e6917044a0c1635df20e350d.1710282274.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1710282274.git.mst@redhat.com>
From: David Hildenbrand <david@redhat.com>
Let's factor it out, reducing quite some code duplication and perparing
for further changes.
If we fail to mmap a region and panic, we now simply don't add that
(broken) region.
Note that we now increment dev->nregions as we are successfully
adding memory regions, and don't increment dev->nregions if anything went
wrong.
Reviewed-by: Raphael Norwitz <raphael@enfabrica.net>
Acked-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20240214151701.29906-6-david@redhat.com>
Tested-by: Mario Casquero <mcasquer@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
subprojects/libvhost-user/libvhost-user.c | 168 ++++++++--------------
1 file changed, 60 insertions(+), 108 deletions(-)
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index a7bd7de3cd..f43b5096d0 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -256,6 +256,61 @@ vu_remove_all_mem_regs(VuDev *dev)
dev->nregions = 0;
}
+static void
+_vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
+{
+ int prot = PROT_READ | PROT_WRITE;
+ VuDevRegion *r;
+ void *mmap_addr;
+
+ DPRINT("Adding region %d\n", dev->nregions);
+ DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
+ msg_region->guest_phys_addr);
+ DPRINT(" memory_size: 0x%016"PRIx64"\n",
+ msg_region->memory_size);
+ DPRINT(" userspace_addr: 0x%016"PRIx64"\n",
+ msg_region->userspace_addr);
+ DPRINT(" mmap_offset: 0x%016"PRIx64"\n",
+ msg_region->mmap_offset);
+
+ if (dev->postcopy_listening) {
+ /*
+ * In postcopy we're using PROT_NONE here to catch anyone
+ * accessing it before we userfault
+ */
+ prot = PROT_NONE;
+ }
+
+ /*
+ * We don't use offset argument of mmap() since the mapped address has
+ * to be page aligned, and we use huge pages.
+ */
+ mmap_addr = mmap(0, msg_region->memory_size + msg_region->mmap_offset,
+ prot, MAP_SHARED | MAP_NORESERVE, fd, 0);
+ if (mmap_addr == MAP_FAILED) {
+ vu_panic(dev, "region mmap error: %s", strerror(errno));
+ return;
+ }
+ DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
+ (uint64_t)(uintptr_t)mmap_addr);
+
+ r = &dev->regions[dev->nregions];
+ r->gpa = msg_region->guest_phys_addr;
+ r->size = msg_region->memory_size;
+ r->qva = msg_region->userspace_addr;
+ r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
+ r->mmap_offset = msg_region->mmap_offset;
+ dev->nregions++;
+
+ if (dev->postcopy_listening) {
+ /*
+ * Return the address to QEMU so that it can translate the ufd
+ * fault addresses back.
+ */
+ msg_region->userspace_addr = r->mmap_addr + r->mmap_offset;
+ }
+}
+
static void
vmsg_close_fds(VhostUserMsg *vmsg)
{
@@ -727,10 +782,7 @@ generate_faults(VuDev *dev) {
static bool
vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
int i;
- bool track_ramblocks = dev->postcopy_listening;
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
- VuDevRegion *dev_region = &dev->regions[dev->nregions];
- void *mmap_addr;
if (vmsg->fd_num != 1) {
vmsg_close_fds(vmsg);
@@ -760,69 +812,20 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
* we know all the postcopy client bases have been received, and we
* should start generating faults.
*/
- if (track_ramblocks &&
+ if (dev->postcopy_listening &&
vmsg->size == sizeof(vmsg->payload.u64) &&
vmsg->payload.u64 == 0) {
(void)generate_faults(dev);
return false;
}
- DPRINT("Adding region: %u\n", dev->nregions);
- DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
- msg_region->guest_phys_addr);
- DPRINT(" memory_size: 0x%016"PRIx64"\n",
- msg_region->memory_size);
- DPRINT(" userspace_addr 0x%016"PRIx64"\n",
- msg_region->userspace_addr);
- DPRINT(" mmap_offset 0x%016"PRIx64"\n",
- msg_region->mmap_offset);
-
- dev_region->gpa = msg_region->guest_phys_addr;
- dev_region->size = msg_region->memory_size;
- dev_region->qva = msg_region->userspace_addr;
- dev_region->mmap_offset = msg_region->mmap_offset;
-
- /*
- * We don't use offset argument of mmap() since the
- * mapped address has to be page aligned, and we use huge
- * pages.
- */
- if (track_ramblocks) {
- /*
- * In postcopy we're using PROT_NONE here to catch anyone
- * accessing it before we userfault.
- */
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
- PROT_NONE, MAP_SHARED | MAP_NORESERVE,
- vmsg->fds[0], 0);
- } else {
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE,
- vmsg->fds[0], 0);
- }
-
- if (mmap_addr == MAP_FAILED) {
- vu_panic(dev, "region mmap error: %s", strerror(errno));
- } else {
- dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
- DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
- dev_region->mmap_addr);
- }
-
+ _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]);
close(vmsg->fds[0]);
- if (track_ramblocks) {
- /*
- * Return the address to QEMU so that it can translate the ufd
- * fault addresses back.
- */
- msg_region->userspace_addr = (uintptr_t)(mmap_addr +
- dev_region->mmap_offset);
-
+ if (dev->postcopy_listening) {
/* Send the message back to qemu with the addresses filled in. */
vmsg->fd_num = 0;
DPRINT("Successfully added new region in postcopy\n");
- dev->nregions++;
return true;
} else {
for (i = 0; i < dev->max_queues; i++) {
@@ -835,7 +838,6 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
}
DPRINT("Successfully added new region\n");
- dev->nregions++;
return false;
}
}
@@ -940,63 +942,13 @@ static bool
vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
{
VhostUserMemory m = vmsg->payload.memory, *memory = &m;
- int prot = PROT_READ | PROT_WRITE;
unsigned int i;
- if (dev->postcopy_listening) {
- /*
- * In postcopy we're using PROT_NONE here to catch anyone
- * accessing it before we userfault
- */
- prot = PROT_NONE;
- }
-
vu_remove_all_mem_regs(dev);
- dev->nregions = memory->nregions;
DPRINT("Nregions: %u\n", memory->nregions);
- for (i = 0; i < dev->nregions; i++) {
- void *mmap_addr;
- VhostUserMemoryRegion *msg_region = &memory->regions[i];
- VuDevRegion *dev_region = &dev->regions[i];
-
- DPRINT("Region %d\n", i);
- DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
- msg_region->guest_phys_addr);
- DPRINT(" memory_size: 0x%016"PRIx64"\n",
- msg_region->memory_size);
- DPRINT(" userspace_addr 0x%016"PRIx64"\n",
- msg_region->userspace_addr);
- DPRINT(" mmap_offset 0x%016"PRIx64"\n",
- msg_region->mmap_offset);
-
- dev_region->gpa = msg_region->guest_phys_addr;
- dev_region->size = msg_region->memory_size;
- dev_region->qva = msg_region->userspace_addr;
- dev_region->mmap_offset = msg_region->mmap_offset;
-
- /* We don't use offset argument of mmap() since the
- * mapped address has to be page aligned, and we use huge
- * pages. */
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
- prot, MAP_SHARED | MAP_NORESERVE, vmsg->fds[i], 0);
-
- if (mmap_addr == MAP_FAILED) {
- vu_panic(dev, "region mmap error: %s", strerror(errno));
- } else {
- dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
- DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
- dev_region->mmap_addr);
- }
-
- if (dev->postcopy_listening) {
- /*
- * Return the address to QEMU so that it can translate the ufd
- * fault addresses back.
- */
- msg_region->userspace_addr = (uintptr_t)(mmap_addr +
- dev_region->mmap_offset);
- }
+ for (i = 0; i < memory->nregions; i++) {
+ _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]);
close(vmsg->fds[i]);
}
--
MST
next prev parent reply other threads:[~2024-03-12 22:41 UTC|newest]
Thread overview: 80+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-12 22:25 [PULL 00/68] virtio,pc,pci: features, cleanups, fixes Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 01/68] vdpa: add back vhost_vdpa_net_first_nc_vdpa Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 02/68] vdpa: factor out vhost_vdpa_last_dev Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 03/68] vdpa: factor out vhost_vdpa_net_get_nc_vdpa Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 04/68] vdpa: add vhost_vdpa_set_address_space_id trace Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 05/68] vdpa: add vhost_vdpa_get_vring_base trace for svq mode Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 06/68] vdpa: add vhost_vdpa_set_dev_vring_base " Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 07/68] vdpa: add trace events for vhost_vdpa_net_load_cmd Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 08/68] vdpa: add trace event for vhost_vdpa_net_load_mq Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 09/68] vdpa: define SVQ transitioning state for mode switching Michael S. Tsirkin
2024-03-12 22:25 ` [PULL 10/68] vdpa: indicate transitional state for SVQ switching Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 11/68] vdpa: fix network breakage after cancelling migration Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 12/68] libvhost-user: Dynamically allocate memory for memory slots Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 13/68] libvhost-user: Bump up VHOST_USER_MAX_RAM_SLOTS to 509 Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 14/68] libvhost-user: Factor out removing all mem regions Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 15/68] libvhost-user: Merge vu_set_mem_table_exec_postcopy() into vu_set_mem_table_exec() Michael S. Tsirkin
2024-03-12 22:26 ` Michael S. Tsirkin [this message]
2024-03-12 22:26 ` [PULL 17/68] libvhost-user: No need to check for NULL when unmapping Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 18/68] libvhost-user: Don't zero out memory for memory regions Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 19/68] libvhost-user: Don't search for duplicates when removing " Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 20/68] libvhost-user: Factor out search for memory region by GPA and simplify Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 21/68] libvhost-user: Speedup gpa_to_mem_region() and vu_gpa_to_va() Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 22/68] libvhost-user: Use most of mmap_offset as fd_offset Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 23/68] libvhost-user: Factor out vq usability check Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 24/68] libvhost-user: Dynamically remap rings after (temporarily?) removing memory regions Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 25/68] libvhost-user: Mark mmap'ed region memory as MADV_DONTDUMP Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 26/68] pcie: Support PCIe Gen5/Gen6 link speeds Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 27/68] vdpa: stash memory region properties in vars Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 28/68] vdpa: trace skipped memory sections Michael S. Tsirkin
2024-03-12 22:26 ` [PULL 29/68] hw/pci-bridge/pxb-cxl: Drop RAS capability from host bridge Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 30/68] hw/audio/virtio-sound: return correct command response size Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 31/68] hw/virtio: check owner for removing objects Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 32/68] hw/virtio: Add support for VDPA network simulation devices Michael S. Tsirkin
2024-03-20 8:58 ` Paolo Bonzini
2024-03-20 13:12 ` Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 33/68] hw/cxl/cxl-host: Fix missing ERRP_GUARD() in cxl_fixed_memory_window_config() Michael S. Tsirkin
2024-03-13 2:20 ` Zhao Liu
2024-03-12 22:27 ` [PULL 34/68] hw/display/macfb: Fix missing ERRP_GUARD() in macfb_nubus_realize() Michael S. Tsirkin
2024-03-13 2:23 ` Zhao Liu
2024-03-13 10:19 ` Philippe Mathieu-Daudé
2024-03-12 22:27 ` [PULL 35/68] hw/mem/cxl_type3: Fix missing ERRP_GUARD() in ct3_realize() Michael S. Tsirkin
2024-03-13 2:25 ` Zhao Liu
2024-03-12 22:27 ` [PULL 36/68] hw/misc/xlnx-versal-trng: Check returned bool in trng_prop_fault_event_set() Michael S. Tsirkin
2024-03-13 2:26 ` Zhao Liu
2024-03-12 22:27 ` [PULL 37/68] hw/pci-bridge/cxl_upstream: Fix missing ERRP_GUARD() in cxl_usp_realize() Michael S. Tsirkin
2024-03-13 2:28 ` Zhao Liu
2024-03-12 22:27 ` [PULL 38/68] hw/vfio/iommufd: Fix missing ERRP_GUARD() in iommufd_cdev_getfd() Michael S. Tsirkin
2024-03-13 2:39 ` Zhao Liu
2024-03-12 22:27 ` [PULL 39/68] hw/intc: Check @errp to handle the error of IOAPICCommonClass.realize() Michael S. Tsirkin
2024-03-13 2:37 ` Zhao Liu
2024-03-12 22:27 ` [PULL 40/68] Implement base of SMBIOS type 9 descriptor Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 41/68] Implement SMBIOS type 9 v2.6 Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 42/68] hw/nvme: Use pcie_sriov_num_vfs() Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 43/68] pcie_sriov: Validate NumVFs Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 44/68] pcie_sriov: Reset SR-IOV extended capability Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 45/68] pcie_sriov: Do not reset NumVFs after disabling VFs Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 46/68] hw/pci: Always call pcie_sriov_pf_reset() Michael S. Tsirkin
2024-03-12 22:27 ` [PULL 47/68] pc: q35: Bump max_cpus to 4096 vcpus Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 48/68] Revert "hw/i386/pc_sysfw: Inline pc_system_flash_create() and remove it" Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 49/68] Revert "hw/i386/pc: Confine system flash handling to pc_sysfw" Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 50/68] hw/i386/pc: Remove "rtc_state" link again Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 51/68] hw/i386/pc: Avoid one use of the current_machine global Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 52/68] hw/i386/pc: Set "normal" boot device order in pc_basic_device_init() Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 53/68] hw/i386/pc: Inline pc_cmos_init() into pc_cmos_init_late() and remove it Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 54/68] qom: new object to associate device to NUMA node Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 55/68] hw/acpi: Implement the SRAT GI affinity structure Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 56/68] hw/i386/acpi-build: Add support for SRAT Generic Initiator structures Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 57/68] virtio-iommu: Add a granule property Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 58/68] virtio-iommu: Change the default granule to the host page size Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 59/68] qemu-options.hx: Document the virtio-iommu-pci granule option Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 60/68] virtio-iommu: Trace domain range limits as unsigned int Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 61/68] virtio-iommu: Add an option to define the input range width Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 62/68] hw/i386/q35: Set virtio-iommu aw-bits default value to 39 Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 63/68] hw/arm/virt: Set virtio-iommu aw-bits default value to 48 Michael S. Tsirkin
2024-03-12 22:28 ` [PULL 64/68] qemu-options.hx: Document the virtio-iommu-pci aw-bits option Michael S. Tsirkin
2024-03-12 22:29 ` [PULL 65/68] hmat acpi: Do not add Memory Proximity Domain Attributes Structure targetting non existent memory Michael S. Tsirkin
2024-03-12 22:29 ` [PULL 66/68] hmat acpi: Fix out of bounds access due to missing use of indirection Michael S. Tsirkin
2024-03-12 22:29 ` [PULL 67/68] hw/cxl: Fix missing reserved data in CXL Device DVSEC Michael S. Tsirkin
2024-03-12 22:29 ` [PULL 68/68] docs/specs/pvpanic: document shutdown event Michael S. Tsirkin
2024-03-13 17:07 ` [PULL 00/68] virtio,pc,pci: features, cleanups, fixes Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=93fec23d8cecebf0e6917044a0c1635df20e350d.1710282274.git.mst@redhat.com \
--to=mst@redhat.com \
--cc=david@redhat.com \
--cc=mcasquer@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=raphael@enfabrica.net \
--cc=sgarzare@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).