From: Albert Esteve <aesteve@redhat.com>
To: qemu-devel@nongnu.org
Cc: stefanha@redhat.com, slp@redhat.com, david@redhat.com,
"Michael S. Tsirkin" <mst@redhat.com>,
"Stefano Garzarella" <sgarzare@redhat.com>,
jasowang@redhat.com, stevensd@chromium.org, hi@alyssa.is,
"Alex Bennée" <alex.bennee@linaro.org>,
"Albert Esteve" <aesteve@redhat.com>
Subject: [PATCH v5 7/7] vhost-user-devive: Add shmem BAR
Date: Mon, 9 Jun 2025 16:47:29 +0200 [thread overview]
Message-ID: <20250609144729.884027-8-aesteve@redhat.com> (raw)
In-Reply-To: <20250609144729.884027-1-aesteve@redhat.com>
Add a shmem BAR block in the vhost-user-device,
which files can be directly mapped into.
The number, shmid, and size of the VIRTIO Shared
Memory subregions is retrieved through a
get_shmem_config message sent by the
vhost-user-base module on the realize step,
after virtio_init().
By default, if VHOST_USER_PROTOCOL_F_SHMEM
feature is not supported by the backend,
there is no cache.
Signed-off-by: Albert Esteve <aesteve@redhat.com>
---
hw/virtio/vhost-user-base.c | 47 +++++++++++++++++++++++++++++--
hw/virtio/vhost-user-device-pci.c | 34 ++++++++++++++++++++--
2 files changed, 76 insertions(+), 5 deletions(-)
diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
index ff67a020b4..e86e391fa5 100644
--- a/hw/virtio/vhost-user-base.c
+++ b/hw/virtio/vhost-user-base.c
@@ -16,6 +16,7 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/vhost-user-base.h"
#include "qemu/error-report.h"
+#include "migration/blocker.h"
static void vub_start(VirtIODevice *vdev)
{
@@ -276,7 +277,8 @@ static void vub_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBase *vub = VHOST_USER_BASE(dev);
- int ret;
+ uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
+ int i, ret, nregions;
if (!vub->chardev.chr) {
error_setg(errp, "vhost-user-base: missing chardev");
@@ -319,7 +321,7 @@ static void vub_device_realize(DeviceState *dev, Error **errp)
/* Allocate queues */
vub->vqs = g_ptr_array_sized_new(vub->num_vqs);
- for (int i = 0; i < vub->num_vqs; i++) {
+ for (i = 0; i < vub->num_vqs; i++) {
g_ptr_array_add(vub->vqs,
virtio_add_queue(vdev, vub->vq_size,
vub_handle_output));
@@ -333,11 +335,50 @@ static void vub_device_realize(DeviceState *dev, Error **errp)
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
- do_vhost_user_cleanup(vdev, vub);
+ goto err;
+ }
+
+ ret = vub->vhost_dev.vhost_ops->vhost_get_shmem_config(&vub->vhost_dev,
+ &nregions,
+ memory_sizes,
+ errp);
+
+ if (ret < 0) {
+ goto err;
+ }
+
+ for (i = 0; i < nregions; i++) {
+ if (memory_sizes[i]) {
+ if (vub->vhost_dev.migration_blocker == NULL) {
+ error_setg(&vub->vhost_dev.migration_blocker,
+ "Migration disabled: devices with VIRTIO Shared Memory "
+ "Regions do not support migration yet.");
+ ret = migrate_add_blocker_normal(
+ &vub->vhost_dev.migration_blocker,
+ errp);
+
+ if (ret < 0) {
+ goto err;
+ }
+ }
+
+ if (memory_sizes[i] % qemu_real_host_page_size() != 0) {
+ error_setg(errp, "Shared memory %d size must be a power of 2 "
+ "no smaller than the page size", i);
+ goto err;
+ }
+
+ memory_region_init(virtio_new_shmem_region(vdev, i)->mr,
+ OBJECT(vdev), "vub-shm-" + i,
+ memory_sizes[i]);
+ }
}
qemu_chr_fe_set_handlers(&vub->chardev, NULL, NULL, vub_event, NULL,
dev, NULL, true);
+ return;
+err:
+ do_vhost_user_cleanup(vdev, vub);
}
static void vub_device_unrealize(DeviceState *dev)
diff --git a/hw/virtio/vhost-user-device-pci.c b/hw/virtio/vhost-user-device-pci.c
index f10bac874e..eeb52671a0 100644
--- a/hw/virtio/vhost-user-device-pci.c
+++ b/hw/virtio/vhost-user-device-pci.c
@@ -8,14 +8,18 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/vhost-user-base.h"
#include "hw/virtio/virtio-pci.h"
+#define VIRTIO_DEVICE_PCI_SHMEM_BAR 2
+
struct VHostUserDevicePCI {
VirtIOPCIProxy parent_obj;
VHostUserBase vub;
+ MemoryRegion shmembar;
};
#define TYPE_VHOST_USER_DEVICE_PCI "vhost-user-device-pci-base"
@@ -25,10 +29,36 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostUserDevicePCI, VHOST_USER_DEVICE_PCI)
static void vhost_user_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
VHostUserDevicePCI *dev = VHOST_USER_DEVICE_PCI(vpci_dev);
- DeviceState *vdev = DEVICE(&dev->vub);
+ DeviceState *dev_state = DEVICE(&dev->vub);
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev_state);
+ VirtSharedMemory *shmem, *next;
+ uint64_t offset = 0, shmem_size = 0;
vpci_dev->nvectors = 1;
- qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
+ qdev_realize(dev_state, BUS(&vpci_dev->bus), errp);
+
+ QSIMPLEQ_FOREACH_SAFE(shmem, &vdev->shmem_list, entry, next) {
+ if (shmem->mr->size > UINT64_MAX - shmem_size) {
+ error_setg(errp, "Total shared memory required overflow");
+ return;
+ }
+ shmem_size = shmem_size + shmem->mr->size;
+ }
+ if (shmem_size) {
+ memory_region_init(&dev->shmembar, OBJECT(vpci_dev),
+ "vhost-device-pci-shmembar", shmem_size);
+ QSIMPLEQ_FOREACH_SAFE(shmem, &vdev->shmem_list, entry, next) {
+ memory_region_add_subregion(&dev->shmembar, offset, shmem->mr);
+ virtio_pci_add_shm_cap(vpci_dev, VIRTIO_DEVICE_PCI_SHMEM_BAR,
+ offset, shmem->mr->size, 0);
+ offset = offset + shmem->mr->size;
+ }
+ pci_register_bar(&vpci_dev->pci_dev, VIRTIO_DEVICE_PCI_SHMEM_BAR,
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_PREFETCH |
+ PCI_BASE_ADDRESS_MEM_TYPE_64,
+ &dev->shmembar);
+ }
}
static void vhost_user_device_pci_class_init(ObjectClass *klass,
--
2.49.0
next prev parent reply other threads:[~2025-06-09 14:49 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-09 14:47 [PATCH v5 0/7] vhost-user: Add SHMEM_MAP/UNMAP requests Albert Esteve
2025-06-09 14:47 ` [PATCH v5 1/7] vhost-user: Add VirtIO Shared Memory map request Albert Esteve
2025-06-10 10:43 ` David Hildenbrand
2025-06-12 16:18 ` Stefan Hajnoczi
[not found] ` <CADSE00LrrTNYLKnGqUNSH_HqtPA4n0t6Qq1JA5b1=mUQ2XO0iA@mail.gmail.com>
2025-06-16 15:33 ` Albert Esteve
2025-06-09 14:47 ` [PATCH v5 2/7] vhost_user.rst: Align VhostUserMsg excerpt members Albert Esteve
2025-06-10 10:44 ` David Hildenbrand
2025-06-09 14:47 ` [PATCH v5 3/7] vhost_user.rst: Add SHMEM_MAP/_UNMAP to spec Albert Esteve
2025-06-11 6:20 ` Alyssa Ross
2025-06-11 8:53 ` Albert Esteve
2025-06-09 14:47 ` [PATCH v5 4/7] vhost_user: Add frontend get_shmem_config command Albert Esteve
2025-06-24 19:23 ` Stefan Hajnoczi
2025-06-09 14:47 ` [PATCH v5 5/7] vhost_user.rst: Add GET_SHMEM_CONFIG message Albert Esteve
2025-06-09 14:47 ` [PATCH v5 6/7] qmp: add shmem feature map Albert Esteve
2025-06-09 14:47 ` Albert Esteve [this message]
2025-06-11 6:29 ` [PATCH v5 7/7] vhost-user-devive: Add shmem BAR Alyssa Ross
2025-06-25 16:47 ` Stefan Hajnoczi
-- strict thread matches above, loose matches on Subject: below --
2025-06-20 9:08 Dorinda Bassey
2025-06-20 11:31 ` Albert Esteve
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250609144729.884027-8-aesteve@redhat.com \
--to=aesteve@redhat.com \
--cc=alex.bennee@linaro.org \
--cc=david@redhat.com \
--cc=hi@alyssa.is \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=slp@redhat.com \
--cc=stefanha@redhat.com \
--cc=stevensd@chromium.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).