* [PATCH v4 2/5] vhost-user: introduce should_drain on GET_VRING_BASE
2025-12-29 10:21 [PATCH v4 0/5] support inflight migration Alexandr Moshkov
2025-12-29 10:21 ` [PATCH v4 1/5] vhost-user.rst: specify vhost-user back-end action on GET_VRING_BASE Alexandr Moshkov
@ 2025-12-29 10:21 ` Alexandr Moshkov
2025-12-29 10:21 ` [PATCH v4 3/5] vmstate: introduce VMSTATE_VBUFFER_UINT64 Alexandr Moshkov
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Alexandr Moshkov @ 2025-12-29 10:21 UTC (permalink / raw)
To: qemu-devel
Cc: Gonglei (Arei), Zhenwei Pi, Michael S. Tsirkin,
Stefano Garzarella, Raphael Norwitz, Kevin Wolf, Hanna Reitz,
Jason Wang, Paolo Bonzini, Fam Zheng, Alex Bennée,
Stefan Hajnoczi, mzamazal, Peter Xu, Fabiano Rosas, qemu-block,
virtio-fs, yc-core@yandex-team.ru, Eric Blake, Markus Armbruster,
Alexandr Moshkov
Now on GET_VRING_BASE QEMU can control whether to wait for in-flight
requests to complete or not.
It will be helpfull in future for in-flight requests migration in vhost-user
devices.
Signed-off-by: Alexandr Moshkov <dtalexundeer@yandex-team.ru>
---
backends/cryptodev-vhost.c | 2 +-
backends/vhost-user.c | 2 +-
docs/interop/vhost-user.rst | 11 +++++++----
hw/block/vhost-user-blk.c | 3 ++-
hw/net/vhost_net.c | 9 +++++----
hw/scsi/vhost-scsi-common.c | 2 +-
hw/virtio/vdpa-dev.c | 2 +-
hw/virtio/vhost-user-base.c | 2 +-
hw/virtio/vhost-user-fs.c | 2 +-
hw/virtio/vhost-user-scmi.c | 2 +-
hw/virtio/vhost-vsock-common.c | 2 +-
hw/virtio/vhost.c | 24 +++++++++++++++---------
include/hw/virtio/vhost.h | 7 +++++--
13 files changed, 42 insertions(+), 28 deletions(-)
diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c
index b4dafb4062..aaa4e6bfcb 100644
--- a/backends/cryptodev-vhost.c
+++ b/backends/cryptodev-vhost.c
@@ -109,7 +109,7 @@ static void
cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto,
VirtIODevice *dev)
{
- vhost_dev_stop(&crypto->dev, dev, false);
+ vhost_dev_stop(&crypto->dev, dev, false, true);
vhost_dev_disable_notifiers(&crypto->dev, dev);
}
diff --git a/backends/vhost-user.c b/backends/vhost-user.c
index e65ba7b648..bb271c4d68 100644
--- a/backends/vhost-user.c
+++ b/backends/vhost-user.c
@@ -108,7 +108,7 @@ vhost_user_backend_stop(VhostUserBackend *b)
return 0;
}
- ret = vhost_dev_stop(&b->dev, b->vdev, true);
+ ret = vhost_dev_stop(&b->dev, b->vdev, true, true);
if (k->set_guest_notifiers &&
k->set_guest_notifiers(qbus->parent, b->dev.nvqs, false) < 0) {
diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst
index 02908b48fa..803d5c6b8f 100644
--- a/docs/interop/vhost-user.rst
+++ b/docs/interop/vhost-user.rst
@@ -1243,11 +1243,14 @@ Front-end message types
When and as long as all of a device's vrings are stopped, it is
*suspended*, see :ref:`Suspended device state
- <suspended_device_state>`. The back-end must complete all inflight I/O
- requests for the specified vring before stopping it.
+ <suspended_device_state>`.
- The request payload's *num* field is currently reserved and must be
- set to 0.
+ The request payload's *num* field controls inflight I/O handling:
+
+ * When *num* is set to 1, the back-end must complete all inflight I/O
+ requests for the specified vring before stopping it.
+ * When *num* is set to 0, the back-end may stop the vring immediately
+ without waiting for inflight I/O requests to complete.
``VHOST_USER_SET_VRING_KICK``
:id: 12
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index a8fd90480a..56d55c18c8 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -227,6 +227,7 @@ static int vhost_user_blk_stop(VirtIODevice *vdev)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret;
bool force_stop = false;
+ bool should_drain = true;
trace_vhost_user_blk_stop_in(vdev);
@@ -247,7 +248,7 @@ static int vhost_user_blk_stop(VirtIODevice *vdev)
migrate_local_vhost_user_blk());
ret = force_stop ? vhost_dev_force_stop(&s->dev, vdev, true) :
- vhost_dev_stop(&s->dev, vdev, true);
+ vhost_dev_stop(&s->dev, vdev, true, should_drain);
if (k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index c4526974fb..4be966432e 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -384,7 +384,7 @@ fail:
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
- vhost_dev_stop(&net->dev, dev, false);
+ vhost_dev_stop(&net->dev, dev, false, true);
fail_start:
return r;
}
@@ -403,7 +403,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
- vhost_dev_stop(&net->dev, dev, false);
+ vhost_dev_stop(&net->dev, dev, false, true);
if (net->nc->info->stop) {
net->nc->info->stop(net->nc);
}
@@ -636,7 +636,8 @@ void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
vhost_virtqueue_stop(&net->dev,
vdev,
net->dev.vqs + idx,
- net->dev.vq_index + idx);
+ net->dev.vq_index + idx,
+ true);
}
int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
@@ -686,7 +687,7 @@ err_start:
assert(ret >= 0);
}
- vhost_dev_stop(&net->dev, vdev, false);
+ vhost_dev_stop(&net->dev, vdev, false, true);
return r;
}
diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c
index 43525ba46d..57b40301ed 100644
--- a/hw/scsi/vhost-scsi-common.c
+++ b/hw/scsi/vhost-scsi-common.c
@@ -108,7 +108,7 @@ int vhost_scsi_common_stop(VHostSCSICommon *vsc)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret = 0;
- ret = vhost_dev_stop(&vsc->dev, vdev, true);
+ ret = vhost_dev_stop(&vsc->dev, vdev, true, true);
if (k->set_guest_notifiers) {
int r = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c
index b6b4ee7d38..45bb8ca59b 100644
--- a/hw/virtio/vdpa-dev.c
+++ b/hw/virtio/vdpa-dev.c
@@ -301,7 +301,7 @@ static void vhost_vdpa_device_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&s->dev, vdev, false);
+ vhost_dev_stop(&s->dev, vdev, false, true);
ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
index 0768231a88..8277d75ce9 100644
--- a/hw/virtio/vhost-user-base.c
+++ b/hw/virtio/vhost-user-base.c
@@ -77,7 +77,7 @@ static int vub_stop(VirtIODevice *vdev)
return 0;
}
- ret = vhost_dev_stop(&vub->vhost_dev, vdev, true);
+ ret = vhost_dev_stop(&vub->vhost_dev, vdev, true, true);
if (k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index 2a8eead90b..2b4b52de52 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -111,7 +111,7 @@ static int vuf_stop(VirtIODevice *vdev)
return 0;
}
- ret = vhost_dev_stop(&fs->vhost_dev, vdev, true);
+ ret = vhost_dev_stop(&fs->vhost_dev, vdev, true, true);
if (k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
diff --git a/hw/virtio/vhost-user-scmi.c b/hw/virtio/vhost-user-scmi.c
index 40e567c18a..580ffa0e2e 100644
--- a/hw/virtio/vhost-user-scmi.c
+++ b/hw/virtio/vhost-user-scmi.c
@@ -101,7 +101,7 @@ static int vu_scmi_stop(VirtIODevice *vdev)
return 0;
}
- ret = vhost_dev_stop(vhost_dev, vdev, true);
+ ret = vhost_dev_stop(vhost_dev, vdev, true, true);
if (k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index c6c44d8989..a2c52c8914 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -106,7 +106,7 @@ int vhost_vsock_common_stop(VirtIODevice *vdev)
return 0;
}
- ret = vhost_dev_stop(&vvc->vhost_dev, vdev, true);
+ ret = vhost_dev_stop(&vvc->vhost_dev, vdev, true, true);
if (k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index c46203eb9c..cb2e21bd75 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1426,11 +1426,13 @@ fail:
static int do_vhost_virtqueue_stop(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
- unsigned idx, bool force)
+ unsigned idx, bool force,
+ bool should_drain)
{
int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
struct vhost_vring_state state = {
.index = vhost_vq_index,
+ .num = should_drain,
};
int r = 0;
@@ -1481,9 +1483,10 @@ static int do_vhost_virtqueue_stop(struct vhost_dev *dev,
int vhost_virtqueue_stop(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
- unsigned idx)
+ unsigned idx,
+ bool should_drain)
{
- return do_vhost_virtqueue_stop(dev, vdev, vq, idx, false);
+ return do_vhost_virtqueue_stop(dev, vdev, vq, idx, false, should_drain);
}
static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
@@ -2310,7 +2313,8 @@ fail_vq:
vhost_virtqueue_stop(hdev,
vdev,
hdev->vqs + i,
- hdev->vq_index + i);
+ hdev->vq_index + i,
+ true);
}
fail_mem:
@@ -2325,7 +2329,7 @@ fail_features:
/* Host notifiers must be enabled at this point. */
static int do_vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
- bool vrings, bool force)
+ bool vrings, bool force, bool should_drain)
{
int i;
int rc = 0;
@@ -2361,7 +2365,8 @@ static int do_vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
vdev,
hdev->vqs + i,
hdev->vq_index + i,
- force);
+ force,
+ should_drain);
}
if (hdev->vhost_ops->vhost_reset_status) {
hdev->vhost_ops->vhost_reset_status(hdev);
@@ -2383,15 +2388,16 @@ static int do_vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
return rc;
}
-int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
+int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings,
+ bool should_drain)
{
- return do_vhost_dev_stop(hdev, vdev, vrings, false);
+ return do_vhost_dev_stop(hdev, vdev, vrings, false, should_drain);
}
int vhost_dev_force_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
bool vrings)
{
- return do_vhost_dev_stop(hdev, vdev, vrings, true);
+ return do_vhost_dev_stop(hdev, vdev, vrings, true, false);
}
int vhost_net_set_backend(struct vhost_dev *hdev,
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 13ca2c319f..94fb9a6654 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -235,6 +235,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
* @hdev: common vhost_dev structure
* @vdev: the VirtIODevice structure
* @vrings: true to have vrings disabled in this call
+ * @should_drain: true for notice back-end to drain in-flight requests
*
* Stop the vhost device. After the device is stopped the notifiers
* can be disabled (@vhost_dev_disable_notifiers) and the device can
@@ -242,7 +243,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
*
* Return: 0 on success, != 0 on error when stopping dev.
*/
-int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
+ bool vrings, bool should_drain);
/**
* vhost_dev_force_stop() - force stop the vhost device
@@ -400,7 +402,8 @@ int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
struct vhost_virtqueue *vq, unsigned idx);
int vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
- struct vhost_virtqueue *vq, unsigned idx);
+ struct vhost_virtqueue *vq, unsigned idx,
+ bool should_drain);
void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
void vhost_dev_free_inflight(struct vhost_inflight *inflight);
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v4 4/5] vhost: add vmstate for inflight region with inner buffer
2025-12-29 10:21 [PATCH v4 0/5] support inflight migration Alexandr Moshkov
` (2 preceding siblings ...)
2025-12-29 10:21 ` [PATCH v4 3/5] vmstate: introduce VMSTATE_VBUFFER_UINT64 Alexandr Moshkov
@ 2025-12-29 10:21 ` Alexandr Moshkov
2025-12-29 10:21 ` [PATCH v4 5/5] vhost-user-blk: support inter-host inflight migration Alexandr Moshkov
2026-01-06 19:04 ` [PATCH v4 0/5] support " Stefan Hajnoczi
5 siblings, 0 replies; 7+ messages in thread
From: Alexandr Moshkov @ 2025-12-29 10:21 UTC (permalink / raw)
To: qemu-devel
Cc: Gonglei (Arei), Zhenwei Pi, Michael S. Tsirkin,
Stefano Garzarella, Raphael Norwitz, Kevin Wolf, Hanna Reitz,
Jason Wang, Paolo Bonzini, Fam Zheng, Alex Bennée,
Stefan Hajnoczi, mzamazal, Peter Xu, Fabiano Rosas, qemu-block,
virtio-fs, yc-core@yandex-team.ru, Eric Blake, Markus Armbruster,
Alexandr Moshkov
Prepare for future inflight region migration for vhost-user-blk.
We need to migrate size, queue_size, and inner buffer.
So firstly it migrate size and queue_size fields, then allocate memory for buffer with
migrated size, then migrate inner buffer itself.
Signed-off-by: Alexandr Moshkov <dtalexundeer@yandex-team.ru>
---
hw/virtio/vhost.c | 42 +++++++++++++++++++++++++++++++++++++++
include/hw/virtio/vhost.h | 6 ++++++
2 files changed, 48 insertions(+)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index cb2e21bd75..368e1d33ce 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -2031,6 +2031,48 @@ const VMStateDescription vmstate_backend_transfer_vhost_inflight = {
}
};
+static int vhost_inflight_buffer_pre_load(void *opaque, Error **errp)
+{
+ info_report("vhost_inflight_region_buffer_pre_load");
+ struct vhost_inflight *inflight = opaque;
+
+ int fd = -1;
+ void *addr = qemu_memfd_alloc("vhost-inflight", inflight->size,
+ F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
+ &fd, errp);
+ if (*errp) {
+ return -ENOMEM;
+ }
+
+ inflight->offset = 0;
+ inflight->addr = addr;
+ inflight->fd = fd;
+
+ return 0;
+}
+
+const VMStateDescription vmstate_vhost_inflight_region_buffer = {
+ .name = "vhost-inflight-region/buffer",
+ .pre_load_errp = vhost_inflight_buffer_pre_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VBUFFER_UINT64(addr, struct vhost_inflight, 0, NULL, size),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_vhost_inflight_region = {
+ .name = "vhost-inflight-region",
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(size, struct vhost_inflight),
+ VMSTATE_UINT16(queue_size, struct vhost_inflight),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_vhost_inflight_region_buffer,
+ NULL
+ }
+};
+
const VMStateDescription vmstate_vhost_virtqueue = {
.name = "vhost-virtqueue",
.fields = (const VMStateField[]) {
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 94fb9a6654..453e4a745b 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -599,6 +599,12 @@ extern const VMStateDescription vmstate_backend_transfer_vhost_inflight;
vmstate_backend_transfer_vhost_inflight, \
struct vhost_inflight)
+extern const VMStateDescription vmstate_vhost_inflight_region;
+#define VMSTATE_VHOST_INFLIGHT_REGION(_field, _state) \
+ VMSTATE_STRUCT_POINTER(_field, _state, \
+ vmstate_vhost_inflight_region, \
+ struct vhost_inflight)
+
extern const VMStateDescription vmstate_vhost_dev;
#define VMSTATE_BACKEND_TRANSFER_VHOST(_field, _state) \
VMSTATE_STRUCT(_field, _state, 0, vmstate_vhost_dev, struct vhost_dev)
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v4 5/5] vhost-user-blk: support inter-host inflight migration
2025-12-29 10:21 [PATCH v4 0/5] support inflight migration Alexandr Moshkov
` (3 preceding siblings ...)
2025-12-29 10:21 ` [PATCH v4 4/5] vhost: add vmstate for inflight region with inner buffer Alexandr Moshkov
@ 2025-12-29 10:21 ` Alexandr Moshkov
2026-01-06 19:04 ` [PATCH v4 0/5] support " Stefan Hajnoczi
5 siblings, 0 replies; 7+ messages in thread
From: Alexandr Moshkov @ 2025-12-29 10:21 UTC (permalink / raw)
To: qemu-devel
Cc: Gonglei (Arei), Zhenwei Pi, Michael S. Tsirkin,
Stefano Garzarella, Raphael Norwitz, Kevin Wolf, Hanna Reitz,
Jason Wang, Paolo Bonzini, Fam Zheng, Alex Bennée,
Stefan Hajnoczi, mzamazal, Peter Xu, Fabiano Rosas, qemu-block,
virtio-fs, yc-core@yandex-team.ru, Eric Blake, Markus Armbruster,
Alexandr Moshkov
During inter-host migration, waiting for disk requests to be drained
in the vhost-user backend can incur significant downtime.
This can be avoided if QEMU migrates the inflight region in
vhost-user-blk.
Thus, during the qemu migration, the vhost-user backend can cancel all
inflight requests and
then, after migration, they will be executed on another host.
In vhost_user_blk_stop() on incoming inter-host migration make
should_drain = true, so after GET_VRING_BASE message all in-flight
requests will be migrated to other vm
Signed-off-by: Alexandr Moshkov <dtalexundeer@yandex-team.ru>
---
hw/block/vhost-user-blk.c | 25 +++++++++++++++++++++++++
include/hw/virtio/vhost-user-blk.h | 1 +
2 files changed, 26 insertions(+)
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 56d55c18c8..c537e7ef9b 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -242,6 +242,7 @@ static int vhost_user_blk_stop(VirtIODevice *vdev)
force_stop = s->skip_get_vring_base_on_force_shutdown &&
qemu_force_shutdown_requested();
+ should_drain = !s->inflight_migration;
s->dev.backend_transfer = s->dev.backend_transfer ||
(runstate_check(RUN_STATE_FINISH_MIGRATE) &&
@@ -657,6 +658,24 @@ static struct vhost_dev *vhost_user_blk_get_vhost(VirtIODevice *vdev)
return &s->dev;
}
+static bool vhost_user_blk_inflight_needed(void *opaque)
+{
+ struct VHostUserBlk *s = opaque;
+
+ return s->inflight_migration &&
+ !migrate_local_vhost_user_blk();
+}
+
+static const VMStateDescription vmstate_vhost_user_blk_inflight = {
+ .name = "vhost-user-blk/inflight",
+ .version_id = 1,
+ .needed = vhost_user_blk_inflight_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VHOST_INFLIGHT_REGION(inflight, VHostUserBlk),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static bool vhost_user_blk_pre_incoming(void *opaque, Error **errp)
{
VHostUserBlk *s = VHOST_USER_BLK(opaque);
@@ -679,6 +698,10 @@ static const VMStateDescription vmstate_vhost_user_blk = {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_vhost_user_blk_inflight,
+ NULL
+ }
};
static bool vhost_user_needed(void *opaque)
@@ -752,6 +775,8 @@ static const Property vhost_user_blk_properties[] = {
VIRTIO_BLK_F_WRITE_ZEROES, true),
DEFINE_PROP_BOOL("skip-get-vring-base-on-force-shutdown", VHostUserBlk,
skip_get_vring_base_on_force_shutdown, false),
+ DEFINE_PROP_BOOL("inflight-migration", VHostUserBlk,
+ inflight_migration, false),
};
static void vhost_user_blk_class_init(ObjectClass *klass, const void *data)
diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h
index b06f55fd6f..e1466e5cf6 100644
--- a/include/hw/virtio/vhost-user-blk.h
+++ b/include/hw/virtio/vhost-user-blk.h
@@ -52,6 +52,7 @@ struct VHostUserBlk {
bool started_vu;
bool skip_get_vring_base_on_force_shutdown;
+ bool inflight_migration;
bool incoming_backend;
};
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread