From: Kirti Wankhede <kwankhede@nvidia.com>
To: <alex.williamson@redhat.com>, <cjia@nvidia.com>
Cc: Zhengxiao.zx@Alibaba-inc.com, kevin.tian@intel.com,
yi.l.liu@intel.com, yan.y.zhao@intel.com, eskultet@redhat.com,
ziye.yang@intel.com, qemu-devel@nongnu.org, cohuck@redhat.com,
shuangtai.tst@alibaba-inc.com, dgilbert@redhat.com,
zhi.a.wang@intel.com, mlevitsk@redhat.com, pasic@linux.ibm.com,
aik@ozlabs.ru, Kirti Wankhede <kwankhede@nvidia.com>,
eauger@redhat.com, felipe@nutanix.com,
jonathan.davies@nutanix.com, changpeng.liu@intel.com,
Ken.Xue@amd.com
Subject: [Qemu-devel] [PATCH v6 06/13] vfio: Add VM state change handler to know state of VM
Date: Tue, 9 Jul 2019 07:29:07 +0530 [thread overview]
Message-ID: <1562637554-22439-7-git-send-email-kwankhede@nvidia.com> (raw)
In-Reply-To: <1562637554-22439-1-git-send-email-kwankhede@nvidia.com>
VM state change handler gets called on change in VM's state. This is used to set
VFIO device state to _RUNNING.
VM state change handler, migration state change handler and log_sync listener
are called asynchronously, which sometimes lead to data corruption in migration
region. Initialised mutex that is used to serialize operations on migration data
region during saving state.
Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com>
Reviewed-by: Neo Jia <cjia@nvidia.com>
---
hw/vfio/migration.c | 64 +++++++++++++++++++++++++++++++++++++++++++
hw/vfio/trace-events | 2 ++
include/hw/vfio/vfio-common.h | 4 +++
3 files changed, 70 insertions(+)
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index a2cfbd5af2e1..c01f08b659d0 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -78,6 +78,60 @@ err:
return ret;
}
+static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t state)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIORegion *region = &migration->region.buffer;
+ uint32_t device_state;
+ int ret = 0;
+
+ device_state = (state & VFIO_DEVICE_STATE_MASK) |
+ (vbasedev->device_state & ~VFIO_DEVICE_STATE_MASK);
+
+ if ((device_state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_INVALID) {
+ return -EINVAL;
+ }
+
+ ret = pwrite(vbasedev->fd, &device_state, sizeof(device_state),
+ region->fd_offset + offsetof(struct vfio_device_migration_info,
+ device_state));
+ if (ret < 0) {
+ error_report("%s: Failed to set device state %d %s",
+ vbasedev->name, ret, strerror(errno));
+ return ret;
+ }
+
+ vbasedev->device_state = device_state;
+ trace_vfio_migration_set_state(vbasedev->name, device_state);
+ return 0;
+}
+
+static void vfio_vmstate_change(void *opaque, int running, RunState state)
+{
+ VFIODevice *vbasedev = opaque;
+
+ if ((vbasedev->vm_running != running)) {
+ int ret;
+ uint32_t dev_state;
+
+ if (running) {
+ dev_state = VFIO_DEVICE_STATE_RUNNING;
+ } else {
+ dev_state = (vbasedev->device_state & VFIO_DEVICE_STATE_MASK) &
+ ~VFIO_DEVICE_STATE_RUNNING;
+ }
+
+ ret = vfio_migration_set_state(vbasedev, dev_state);
+ if (ret) {
+ error_report("%s: Failed to set device state 0x%x",
+ vbasedev->name, dev_state);
+ }
+ vbasedev->vm_running = running;
+ trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state),
+ dev_state);
+ }
+}
+
static int vfio_migration_init(VFIODevice *vbasedev,
struct vfio_region_info *info)
{
@@ -93,6 +147,11 @@ static int vfio_migration_init(VFIODevice *vbasedev,
return ret;
}
+ qemu_mutex_init(&vbasedev->migration->lock);
+
+ vbasedev->vm_state = qemu_add_vm_change_state_handler(vfio_vmstate_change,
+ vbasedev);
+
return 0;
}
@@ -135,11 +194,16 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
return;
}
+ if (vbasedev->vm_state) {
+ qemu_del_vm_change_state_handler(vbasedev->vm_state);
+ }
+
if (vbasedev->migration_blocker) {
migrate_del_blocker(vbasedev->migration_blocker);
error_free(vbasedev->migration_blocker);
}
+ qemu_mutex_destroy(&vbasedev->migration->lock);
vfio_migration_region_exit(vbasedev);
g_free(vbasedev->migration);
}
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 191a726a1312..3d15bacd031a 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -146,3 +146,5 @@ vfio_display_edid_write_error(void) ""
# migration.c
vfio_migration_probe(char *name, uint32_t index) " (%s) Region %d"
+vfio_migration_set_state(char *name, uint32_t state) " (%s) state %d"
+vfio_vmstate_change(char *name, int running, const char *reason, uint32_t dev_state) " (%s) running %d reason %s device state %d"
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 152da3f8d6f3..f6c70db3a9c1 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -29,6 +29,7 @@
#ifdef CONFIG_LINUX
#include <linux/vfio.h>
#endif
+#include "sysemu/sysemu.h"
#define VFIO_MSG_PREFIX "vfio %s: "
@@ -124,6 +125,9 @@ typedef struct VFIODevice {
unsigned int flags;
VFIOMigration *migration;
Error *migration_blocker;
+ uint32_t device_state;
+ VMChangeStateEntry *vm_state;
+ int vm_running;
} VFIODevice;
struct VFIODeviceOps {
--
2.7.0
next prev parent reply other threads:[~2019-07-09 4:18 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-09 1:59 [Qemu-devel] [PATCH v6 00/13] Add migration support for VFIO device Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 01/13] vfio: KABI for migration interface Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 02/13] vfio: Add function to unmap VFIO region Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 03/13] vfio: Add vfio_get_object callback to VFIODeviceOps Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 04/13] vfio: Add save and load functions for VFIO PCI devices Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 05/13] vfio: Add migration region initialization and finalize function Kirti Wankhede
2019-07-09 1:59 ` Kirti Wankhede [this message]
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 07/13] vfio: Add migration state change notifier Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 08/13] vfio: Register SaveVMHandlers for VFIO device Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 09/13] vfio: Add save state functions to SaveVMHandlers Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 10/13] vfio: Add load " Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 11/13] vfio: Add function to get dirty page list Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 12/13] vfio: Add vfio_listerner_log_sync to mark dirty pages Kirti Wankhede
2019-07-09 1:59 ` [Qemu-devel] [PATCH v6 13/13] vfio: Make vfio-pci device migration capable Kirti Wankhede
2019-07-09 4:28 ` [Qemu-devel] [PATCH v6 00/13] Add migration support for VFIO device no-reply
2019-07-09 4:29 ` no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1562637554-22439-7-git-send-email-kwankhede@nvidia.com \
--to=kwankhede@nvidia.com \
--cc=Ken.Xue@amd.com \
--cc=Zhengxiao.zx@Alibaba-inc.com \
--cc=aik@ozlabs.ru \
--cc=alex.williamson@redhat.com \
--cc=changpeng.liu@intel.com \
--cc=cjia@nvidia.com \
--cc=cohuck@redhat.com \
--cc=dgilbert@redhat.com \
--cc=eauger@redhat.com \
--cc=eskultet@redhat.com \
--cc=felipe@nutanix.com \
--cc=jonathan.davies@nutanix.com \
--cc=kevin.tian@intel.com \
--cc=mlevitsk@redhat.com \
--cc=pasic@linux.ibm.com \
--cc=qemu-devel@nongnu.org \
--cc=shuangtai.tst@alibaba-inc.com \
--cc=yan.y.zhao@intel.com \
--cc=yi.l.liu@intel.com \
--cc=zhi.a.wang@intel.com \
--cc=ziye.yang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).