From: Laurent Vivier <lvivier@redhat.com>
To: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: "Michael S . Tsirkin" <mst@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Amit Shah <amit@kernel.org>,
qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH v2 2/2] virtio-rng: stop virtqueue while the CPU is stopped
Date: Wed, 12 Apr 2017 15:53:12 +0200 [thread overview]
Message-ID: <20170412135312.1686-3-lvivier@redhat.com> (raw)
In-Reply-To: <20170412135312.1686-1-lvivier@redhat.com>
If we modify the virtio-rng virqueue while the
vmstate is already migrated we can have some
inconsistencies between the virtqueue state and
the memory content.
To avoid this, stop the virtqueue while the CPU
is stopped.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
hw/virtio/trace-events | 3 +++
hw/virtio/virtio-rng.c | 29 +++++++++++++++++++++++------
include/hw/virtio/virtio-rng.h | 2 ++
3 files changed, 28 insertions(+), 6 deletions(-)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 6926eed..1f7a7c1 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -11,8 +11,11 @@ virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u"
# hw/virtio/virtio-rng.c
virtio_rng_guest_not_ready(void *rng) "rng %p: guest not ready"
+virtio_rng_cpu_is_stopped(void *rng, int size) "rng %p: cpu is stopped, dropping %d bytes"
+virtio_rng_popped(void *rng) "rng %p: elem popped"
virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed"
virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left"
+virtio_rng_vm_state_change(void *rng, int running, int state) "rng %p: state change to running %d state %d"
# hw/virtio/virtio-balloon.c
#
diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c
index 9639f4e..a6ee501 100644
--- a/hw/virtio/virtio-rng.c
+++ b/hw/virtio/virtio-rng.c
@@ -53,6 +53,15 @@ static void chr_read(void *opaque, const void *buf, size_t size)
return;
}
+ /* we can't modify the virtqueue until
+ * our state is fully synced
+ */
+
+ if (!runstate_check(RUN_STATE_RUNNING)) {
+ trace_virtio_rng_cpu_is_stopped(vrng, size);
+ return;
+ }
+
vrng->quota_remaining -= size;
offset = 0;
@@ -61,6 +70,7 @@ static void chr_read(void *opaque, const void *buf, size_t size)
if (!elem) {
break;
}
+ trace_virtio_rng_popped(vrng);
len = iov_from_buf(elem->in_sg, elem->in_num,
0, buf + offset, size - offset);
offset += len;
@@ -120,17 +130,21 @@ static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp)
return f;
}
-static int virtio_rng_post_load(void *opaque, int version_id)
+static void virtio_rng_vm_state_change(void *opaque, int running,
+ RunState state)
{
VirtIORNG *vrng = opaque;
+ trace_virtio_rng_vm_state_change(vrng, running, state);
+
/* We may have an element ready but couldn't process it due to a quota
- * limit. Make sure to try again after live migration when the quota may
- * have been reset.
+ * limit or because CPU was stopped. Make sure to try again when the
+ * CPU restart.
*/
- virtio_rng_process(vrng);
- return 0;
+ if (running && is_guest_ready(vrng)) {
+ virtio_rng_process(vrng);
+ }
}
static void check_rate_limit(void *opaque)
@@ -198,6 +212,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
check_rate_limit, vrng);
vrng->activate_timer = true;
+
+ vrng->vmstate = qemu_add_vm_change_state_handler(virtio_rng_vm_state_change,
+ vrng);
}
static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
@@ -205,6 +222,7 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIORNG *vrng = VIRTIO_RNG(dev);
+ qemu_del_vm_change_state_handler(vrng->vmstate);
timer_del(vrng->rate_limit_timer);
timer_free(vrng->rate_limit_timer);
virtio_cleanup(vdev);
@@ -218,7 +236,6 @@ static const VMStateDescription vmstate_virtio_rng = {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
- .post_load = virtio_rng_post_load,
};
static Property virtio_rng_properties[] = {
diff --git a/include/hw/virtio/virtio-rng.h b/include/hw/virtio/virtio-rng.h
index 2d40abd..922dce7 100644
--- a/include/hw/virtio/virtio-rng.h
+++ b/include/hw/virtio/virtio-rng.h
@@ -45,6 +45,8 @@ typedef struct VirtIORNG {
QEMUTimer *rate_limit_timer;
int64_t quota_remaining;
bool activate_timer;
+
+ VMChangeStateEntry *vmstate;
} VirtIORNG;
#endif
--
2.9.3
next prev parent reply other threads:[~2017-04-12 13:53 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-04-12 13:53 [Qemu-devel] [PATCH v2 0/2] migration: fix virtio-rng Laurent Vivier
2017-04-12 13:53 ` [Qemu-devel] [PATCH v2 1/2] migration: don't close a file descriptor while it can be in use Laurent Vivier
2017-04-20 18:48 ` Dr. David Alan Gilbert
2017-04-21 9:19 ` Juan Quintela
2017-04-12 13:53 ` Laurent Vivier [this message]
2017-04-21 9:20 ` [Qemu-devel] [PATCH v2 2/2] virtio-rng: stop virtqueue while the CPU is stopped Juan Quintela
2017-04-13 14:51 ` [Qemu-devel] [PATCH v2 0/2] migration: fix virtio-rng Stefan Hajnoczi
2017-04-17 19:33 ` Amit Shah
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170412135312.1686-3-lvivier@redhat.com \
--to=lvivier@redhat.com \
--cc=amit@kernel.org \
--cc=dgilbert@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).