* [Qemu-devel] [PATCH 1/6] virtio-bus: common ioeventfd infrastructure
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
@ 2016-03-24 16:15 ` Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 2/6] virtio-bus: have callers tolerate new host notifier api Cornelia Huck
` (8 subsequent siblings)
9 siblings, 0 replies; 23+ messages in thread
From: Cornelia Huck @ 2016-03-24 16:15 UTC (permalink / raw)
To: qemu-devel
Cc: famz, borntraeger, mst, tubo, stefanha, Cornelia Huck, pbonzini
Introduce a set of ioeventfd callbacks on the virtio-bus level
that can be implemented by the individual transports. At the
virtio-bus level, do common handling for host notifiers (which
is actually most of it).
Two things of note:
- When setting the host notifier, we only switch from/to the
generic ioeventfd handler. This fixes a latent bug where we
had no ioeventfd assigned for a certain window.
- We always iterate over all possible virtio queues, even though
ccw (currently) has a lower limit. It does not really matter
here.
Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
---
hw/virtio/virtio-bus.c | 132 +++++++++++++++++++++++++++++++++++++++++
include/hw/virtio/virtio-bus.h | 30 ++++++++++
2 files changed, 162 insertions(+)
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index 574f0e2..1313760 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -146,6 +146,138 @@ void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
}
}
+/*
+ * This function handles both assigning the ioeventfd handler and
+ * registering it with the kernel.
+ * assign: register/deregister ioeventfd with the kernel
+ * set_handler: use the generic ioeventfd handler
+ */
+static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
+ int n, bool assign, bool set_handler)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
+
+ if (assign) {
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %d", __func__, r);
+ return r;
+ }
+ virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
+ r = k->ioeventfd_assign(proxy, notifier, n, assign);
+ if (r < 0) {
+ error_report("%s: unable to assign ioeventfd: %d", __func__, r);
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ event_notifier_cleanup(notifier);
+ return r;
+ }
+ } else {
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ k->ioeventfd_assign(proxy, notifier, n, assign);
+ event_notifier_cleanup(notifier);
+ }
+ return r;
+}
+
+void virtio_bus_start_ioeventfd(VirtioBusState *bus)
+{
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev;
+ int n, r;
+
+ if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) {
+ return;
+ }
+ if (k->ioeventfd_disabled(proxy)) {
+ return;
+ }
+ vdev = virtio_bus_get_device(bus);
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = set_host_notifier_internal(proxy, bus, n, true, true);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+ k->ioeventfd_set_started(proxy, true, false);
+ return;
+
+assign_error:
+ while (--n >= 0) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = set_host_notifier_internal(proxy, bus, n, false, false);
+ assert(r >= 0);
+ }
+ k->ioeventfd_set_started(proxy, false, true);
+ error_report("%s: failed. Fallback to userspace (slower).", __func__);
+}
+
+void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
+{
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev;
+ int n, r;
+
+ if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) {
+ return;
+ }
+ vdev = virtio_bus_get_device(bus);
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = set_host_notifier_internal(proxy, bus, n, false, false);
+ assert(r >= 0);
+ }
+ k->ioeventfd_set_started(proxy, false, false);
+}
+
+/*
+ * This function switches from/to the generic ioeventfd handler.
+ * assign==false means 'use generic ioeventfd handler'.
+ */
+int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
+{
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+
+ if (!k->ioeventfd_started) {
+ return -ENOSYS;
+ }
+ if (assign) {
+ /*
+ * Stop using the generic ioeventfd, we are doing eventfd handling
+ * ourselves below
+ */
+ k->ioeventfd_set_disabled(proxy, true);
+ }
+ /*
+ * Just switch the handler, don't deassign the ioeventfd.
+ * Otherwise, there's a window where we don't have an
+ * ioeventfd and we may end up with a notification where
+ * we don't expect one.
+ */
+ virtio_queue_set_host_notifier_fd_handler(vq, assign, !assign);
+ if (!assign) {
+ /* Use generic ioeventfd handler again. */
+ k->ioeventfd_set_disabled(proxy, false);
+ }
+ return 0;
+}
+
static char *virtio_bus_get_dev_path(DeviceState *dev)
{
BusState *bus = qdev_get_parent_bus(dev);
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 3f2c136..9637f80 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -71,6 +71,29 @@ typedef struct VirtioBusClass {
void (*device_unplugged)(DeviceState *d);
int (*query_nvectors)(DeviceState *d);
/*
+ * ioeventfd handling: if the transport implements ioeventfd_started,
+ * it must implement the other ioeventfd callbacks as well
+ */
+ /* Returns true if the ioeventfd has been started for the device. */
+ bool (*ioeventfd_started)(DeviceState *d);
+ /*
+ * Sets the 'ioeventfd started' state after the ioeventfd has been
+ * started/stopped for the device. err signifies whether an error
+ * had occurred.
+ */
+ void (*ioeventfd_set_started)(DeviceState *d, bool started, bool err);
+ /* Returns true if the ioeventfd has been disabled for the device. */
+ bool (*ioeventfd_disabled)(DeviceState *d);
+ /* Sets the 'ioeventfd disabled' state for the device. */
+ void (*ioeventfd_set_disabled)(DeviceState *d, bool disabled);
+ /*
+ * Assigns/deassigns the ioeventfd backing for the transport on
+ * the device for queue number n. Returns an error value on
+ * failure.
+ */
+ int (*ioeventfd_assign)(DeviceState *d, EventNotifier *notifier,
+ int n, bool assign);
+ /*
* Does the transport have variable vring alignment?
* (ie can it ever call virtio_queue_set_align()?)
* Note that changing this will break migration for this transport.
@@ -111,4 +134,11 @@ static inline VirtIODevice *virtio_bus_get_device(VirtioBusState *bus)
return (VirtIODevice *)qdev;
}
+/* Start the ioeventfd. */
+void virtio_bus_start_ioeventfd(VirtioBusState *bus);
+/* Stop the ioeventfd. */
+void virtio_bus_stop_ioeventfd(VirtioBusState *bus);
+/* Switch from/to the generic ioeventfd handler */
+int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign);
+
#endif /* VIRTIO_BUS_H */
--
2.6.5
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [Qemu-devel] [PATCH 2/6] virtio-bus: have callers tolerate new host notifier api
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 1/6] virtio-bus: common ioeventfd infrastructure Cornelia Huck
@ 2016-03-24 16:15 ` Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 3/6] virtio-ccw: convert to ioeventfd callbacks Cornelia Huck
` (7 subsequent siblings)
9 siblings, 0 replies; 23+ messages in thread
From: Cornelia Huck @ 2016-03-24 16:15 UTC (permalink / raw)
To: qemu-devel
Cc: famz, borntraeger, mst, tubo, stefanha, Cornelia Huck, pbonzini
Have vhost and dataplane use the new api for transports that
have been converted.
Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
---
hw/block/dataplane/virtio-blk.c | 14 +++++++++++---
hw/scsi/virtio-scsi-dataplane.c | 20 +++++++++++++++-----
hw/virtio/vhost.c | 20 ++++++++++++++++----
3 files changed, 42 insertions(+), 12 deletions(-)
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 36f3d2b..9ca41d7 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -132,7 +132,8 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
}
/* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->set_host_notifier) {
+ if (!k->set_guest_notifiers ||
+ (!k->set_host_notifier && !k->ioeventfd_started)) {
error_setg(errp,
"device is incompatible with dataplane "
"(transport does not support notifiers)");
@@ -209,7 +210,10 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
s->guest_notifier = virtio_queue_get_guest_notifier(s->vq);
/* Set up virtqueue notify */
- r = k->set_host_notifier(qbus->parent, 0, true);
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), 0, true);
+ if (r == -ENOSYS) {
+ r = k->set_host_notifier(qbus->parent, 0, true);
+ }
if (r != 0) {
fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
goto fail_host_notifier;
@@ -244,6 +248,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
+ int r;
if (!vblk->dataplane_started || s->stopping) {
return;
@@ -268,7 +273,10 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
aio_context_release(s->ctx);
- k->set_host_notifier(qbus->parent, 0, false);
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), 0, false);
+ if (r == -ENOSYS) {
+ k->set_host_notifier(qbus->parent, 0, false);
+ }
/* Clean up guest notifier (irq) */
k->set_guest_notifiers(qbus->parent, 1, false);
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 367e476..88c3abd 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -32,7 +32,8 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
s->ctx = iothread_get_aio_context(vs->conf.iothread);
/* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->set_host_notifier) {
+ if (!k->set_guest_notifiers ||
+ (!k->set_host_notifier && !k->ioeventfd_started)) {
fprintf(stderr, "virtio-scsi: Failed to set iothread "
"(transport does not support notifiers)");
exit(1);
@@ -46,7 +47,10 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n)
int rc;
/* Set up virtqueue notify */
- rc = k->set_host_notifier(qbus->parent, n, true);
+ rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true);
+ if (rc == -ENOSYS) {
+ rc = k->set_host_notifier(qbus->parent, n, true);
+ }
if (rc != 0) {
fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n",
rc);
@@ -129,7 +133,10 @@ fail_vrings:
virtio_scsi_clear_aio(s);
aio_context_release(s->ctx);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
- k->set_host_notifier(qbus->parent, i, false);
+ rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
+ if (rc == -ENOSYS) {
+ k->set_host_notifier(qbus->parent, i, false);
+ }
}
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
fail_guest_notifiers:
@@ -144,7 +151,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
- int i;
+ int i, rc;
if (!s->dataplane_started || s->dataplane_stopping) {
return;
@@ -168,7 +175,10 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
aio_context_release(s->ctx);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
- k->set_host_notifier(qbus->parent, i, false);
+ rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
+ if (rc == -ENOSYS) {
+ k->set_host_notifier(qbus->parent, i, false);
+ }
}
/* Clean up guest notifier (irq) */
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 392d848..4b8c151 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1113,14 +1113,18 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r, e;
- if (!k->set_host_notifier) {
+ if (!k->set_host_notifier || !k->ioeventfd_started) {
fprintf(stderr, "binding does not support host notifiers\n");
r = -ENOSYS;
goto fail;
}
for (i = 0; i < hdev->nvqs; ++i) {
- r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+ true);
+ if (r == -ENOSYS) {
+ r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
+ }
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
goto fail_vq;
@@ -1130,7 +1134,11 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
return 0;
fail_vq:
while (--i >= 0) {
- e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+ false);
+ if (e == -ENOSYS) {
+ e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ }
if (e < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
fflush(stderr);
@@ -1154,7 +1162,11 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
int i, r;
for (i = 0; i < hdev->nvqs; ++i) {
- r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+ false);
+ if (r == -ENOSYS) {
+ r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ }
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
fflush(stderr);
--
2.6.5
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [Qemu-devel] [PATCH 3/6] virtio-ccw: convert to ioeventfd callbacks
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 1/6] virtio-bus: common ioeventfd infrastructure Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 2/6] virtio-bus: have callers tolerate new host notifier api Cornelia Huck
@ 2016-03-24 16:15 ` Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 4/6] virtio-pci: " Cornelia Huck
` (6 subsequent siblings)
9 siblings, 0 replies; 23+ messages in thread
From: Cornelia Huck @ 2016-03-24 16:15 UTC (permalink / raw)
To: qemu-devel
Cc: famz, borntraeger, mst, tubo, stefanha, Cornelia Huck, pbonzini
Use the new interface.
Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
---
hw/s390x/virtio-ccw.c | 133 +++++++++++++++++---------------------------------
1 file changed, 45 insertions(+), 88 deletions(-)
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index cb887ba..7574e0c 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -67,92 +67,58 @@ VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
return vdev;
}
-static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n,
- bool assign, bool set_handler)
+static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
{
- VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
- VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- int r = 0;
- SubchDev *sch = dev->sch;
- uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
+ virtio_bus_start_ioeventfd(&dev->bus);
+}
- if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %d", __func__, r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- r = s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
- if (r < 0) {
- error_report("%s: unable to assign ioeventfd: %d", __func__, r);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
- return r;
- }
- } else {
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
- event_notifier_cleanup(notifier);
- }
- return r;
+static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
+{
+ virtio_bus_stop_ioeventfd(&dev->bus);
}
-static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
+static bool virtio_ccw_ioeventfd_started(DeviceState *d)
{
- VirtIODevice *vdev;
- int n, r;
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- if (!(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) ||
- dev->ioeventfd_disabled ||
- dev->ioeventfd_started) {
- return;
- }
- vdev = virtio_bus_get_device(&dev->bus);
- for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = virtio_ccw_set_guest2host_notifier(dev, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- dev->ioeventfd_started = true;
- return;
+ return dev->ioeventfd_started;
+}
- assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
- assert(r >= 0);
+static void virtio_ccw_ioeventfd_set_started(DeviceState *d, bool started,
+ bool err)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ dev->ioeventfd_started = started;
+ if (err) {
+ /* Disable ioeventfd for this device. */
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
}
- dev->ioeventfd_started = false;
- /* Disable ioeventfd for this device. */
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- error_report("%s: failed. Fallback to userspace (slower).", __func__);
}
-static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
+static bool virtio_ccw_ioeventfd_disabled(DeviceState *d)
{
- VirtIODevice *vdev;
- int n, r;
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- if (!dev->ioeventfd_started) {
- return;
- }
- vdev = virtio_bus_get_device(&dev->bus);
- for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
- assert(r >= 0);
- }
- dev->ioeventfd_started = false;
+ return dev->ioeventfd_disabled ||
+ !(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD);
+}
+
+static void virtio_ccw_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ dev->ioeventfd_disabled = disabled;
+}
+
+static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
+ int n, bool assign)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ SubchDev *sch = dev->sch;
+ uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
+
+ return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
}
VirtualCssBus *virtual_css_bus_init(void)
@@ -1193,19 +1159,6 @@ static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
}
-static int virtio_ccw_set_host_notifier(DeviceState *d, int n, bool assign)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- /* Stop using the generic ioeventfd, we are doing eventfd handling
- * ourselves below */
- dev->ioeventfd_disabled = assign;
- if (assign) {
- virtio_ccw_stop_ioeventfd(dev);
- }
- return virtio_ccw_set_guest2host_notifier(dev, n, assign, false);
-}
-
static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
{
int r;
@@ -1834,7 +1787,6 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->notify = virtio_ccw_notify;
k->vmstate_change = virtio_ccw_vmstate_change;
k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
- k->set_host_notifier = virtio_ccw_set_host_notifier;
k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
k->save_queue = virtio_ccw_save_queue;
k->load_queue = virtio_ccw_load_queue;
@@ -1843,6 +1795,11 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->device_plugged = virtio_ccw_device_plugged;
k->post_plugged = virtio_ccw_post_plugged;
k->device_unplugged = virtio_ccw_device_unplugged;
+ k->ioeventfd_started = virtio_ccw_ioeventfd_started;
+ k->ioeventfd_set_started = virtio_ccw_ioeventfd_set_started;
+ k->ioeventfd_disabled = virtio_ccw_ioeventfd_disabled;
+ k->ioeventfd_set_disabled = virtio_ccw_ioeventfd_set_disabled;
+ k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
}
static const TypeInfo virtio_ccw_bus_info = {
--
2.6.5
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [Qemu-devel] [PATCH 4/6] virtio-pci: convert to ioeventfd callbacks
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
` (2 preceding siblings ...)
2016-03-24 16:15 ` [Qemu-devel] [PATCH 3/6] virtio-ccw: convert to ioeventfd callbacks Cornelia Huck
@ 2016-03-24 16:15 ` Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 5/6] virtio-mmio: " Cornelia Huck
` (5 subsequent siblings)
9 siblings, 0 replies; 23+ messages in thread
From: Cornelia Huck @ 2016-03-24 16:15 UTC (permalink / raw)
To: qemu-devel
Cc: famz, borntraeger, mst, tubo, stefanha, Cornelia Huck, pbonzini
Convert to new interface.
Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
---
hw/virtio/virtio-pci.c | 124 ++++++++++++++++---------------------------------
1 file changed, 41 insertions(+), 83 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 0dadb66..0bcb74f 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -261,14 +261,44 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
return 0;
}
+static bool virtio_pci_ioeventfd_started(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return proxy->ioeventfd_started;
+}
+
+static void virtio_pci_ioeventfd_set_started(DeviceState *d, bool started,
+ bool err)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ proxy->ioeventfd_started = started;
+}
+
+static bool virtio_pci_ioeventfd_disabled(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return proxy->ioeventfd_disabled ||
+ !(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD);
+}
+
+static void virtio_pci_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ proxy->ioeventfd_disabled = disabled;
+}
+
#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
-static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
- int n, bool assign, bool set_handler)
+static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
+ int n, bool assign)
{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
bool fast_mmio = kvm_ioeventfd_any_length_enabled();
@@ -279,16 +309,8 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
virtio_get_queue_index(vq);
hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
- int r = 0;
if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %d",
- __func__, r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
if (modern) {
if (fast_mmio) {
memory_region_add_eventfd(modern_mr, modern_addr, 0,
@@ -324,68 +346,18 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
true, n, notifier);
}
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
}
- return r;
+ return 0;
}
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int n, r;
-
- if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
- proxy->ioeventfd_disabled ||
- proxy->ioeventfd_started) {
- return;
- }
-
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
-
- r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- proxy->ioeventfd_started = true;
- return;
-
-assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
-
- r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
- error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+ virtio_bus_start_ioeventfd(&proxy->bus);
}
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int r;
- int n;
-
- if (!proxy->ioeventfd_started) {
- return;
- }
-
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
-
- r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
+ virtio_bus_stop_ioeventfd(&proxy->bus);
}
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
@@ -1111,24 +1083,6 @@ assign_error:
return r;
}
-static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
-{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- /* Stop using ioeventfd for virtqueue kick if the device starts using host
- * notifiers. This makes it easy to avoid stepping on each others' toes.
- */
- proxy->ioeventfd_disabled = assign;
- if (assign) {
- virtio_pci_stop_ioeventfd(proxy);
- }
- /* We don't need to start here: it's not needed because backend
- * currently only stops on status change away from ok,
- * reset, vmstop and such. If we do add code to start here,
- * need to check vmstate, device state etc. */
- return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
-}
-
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
@@ -2489,12 +2443,16 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->load_extra_state = virtio_pci_load_extra_state;
k->has_extra_state = virtio_pci_has_extra_state;
k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
- k->set_host_notifier = virtio_pci_set_host_notifier;
k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
k->vmstate_change = virtio_pci_vmstate_change;
k->device_plugged = virtio_pci_device_plugged;
k->device_unplugged = virtio_pci_device_unplugged;
k->query_nvectors = virtio_pci_query_nvectors;
+ k->ioeventfd_started = virtio_pci_ioeventfd_started;
+ k->ioeventfd_set_started = virtio_pci_ioeventfd_set_started;
+ k->ioeventfd_disabled = virtio_pci_ioeventfd_disabled;
+ k->ioeventfd_set_disabled = virtio_pci_ioeventfd_set_disabled;
+ k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
}
static const TypeInfo virtio_pci_bus_info = {
--
2.6.5
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [Qemu-devel] [PATCH 5/6] virtio-mmio: convert to ioeventfd callbacks
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
` (3 preceding siblings ...)
2016-03-24 16:15 ` [Qemu-devel] [PATCH 4/6] virtio-pci: " Cornelia Huck
@ 2016-03-24 16:15 ` Cornelia Huck
2016-03-24 16:15 ` [Qemu-devel] [PATCH 6/6] virtio-bus: remove old set_host_notifier callback Cornelia Huck
` (4 subsequent siblings)
9 siblings, 0 replies; 23+ messages in thread
From: Cornelia Huck @ 2016-03-24 16:15 UTC (permalink / raw)
To: qemu-devel
Cc: famz, borntraeger, mst, tubo, stefanha, Cornelia Huck, pbonzini
Convert to the new interface.
Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
---
hw/virtio/virtio-mmio.c | 128 ++++++++++++++++--------------------------------
1 file changed, 41 insertions(+), 87 deletions(-)
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index d4cd91f..eb84b74 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -93,90 +93,59 @@ typedef struct {
bool ioeventfd_started;
} VirtIOMMIOProxy;
-static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy,
- int n, bool assign,
- bool set_handler)
+static bool virtio_mmio_ioeventfd_started(DeviceState *d)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- int r = 0;
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %d",
- __func__, r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
- true, n, notifier);
- } else {
- memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
- true, n, notifier);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
- }
- return r;
+ return proxy->ioeventfd_started;
}
-static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
+static void virtio_mmio_ioeventfd_set_started(DeviceState *d, bool started,
+ bool err)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int n, r;
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- if (!kvm_eventfds_enabled() ||
- proxy->ioeventfd_disabled ||
- proxy->ioeventfd_started) {
- return;
- }
+ proxy->ioeventfd_started = started;
+}
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
+static bool virtio_mmio_ioeventfd_disabled(DeviceState *d)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- proxy->ioeventfd_started = true;
- return;
+ return !kvm_eventfds_enabled() || proxy->ioeventfd_disabled;
+}
-assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
+static void virtio_mmio_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
- error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+ proxy->ioeventfd_disabled = disabled;
}
-static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
+static int virtio_mmio_ioeventfd_assign(DeviceState *d,
+ EventNotifier *notifier,
+ int n, bool assign)
{
- int r;
- int n;
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- if (!proxy->ioeventfd_started) {
- return;
+ if (assign) {
+ memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ true, n, notifier);
+ } else {
+ memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ true, n, notifier);
}
+ return 0;
+}
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
+static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+ virtio_bus_start_ioeventfd(&proxy->bus);
+}
- r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
+static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+ virtio_bus_stop_ioeventfd(&proxy->bus);
}
static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
@@ -498,25 +467,6 @@ assign_error:
return r;
}
-static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n,
- bool assign)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
-
- /* Stop using ioeventfd for virtqueue kick if the device starts using host
- * notifiers. This makes it easy to avoid stepping on each others' toes.
- */
- proxy->ioeventfd_disabled = assign;
- if (assign) {
- virtio_mmio_stop_ioeventfd(proxy);
- }
- /* We don't need to start here: it's not needed because backend
- * currently only stops on status change away from ok,
- * reset, vmstop and such. If we do add code to start here,
- * need to check vmstate, device state etc. */
- return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false);
-}
-
/* virtio-mmio device */
static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
@@ -558,8 +508,12 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
k->notify = virtio_mmio_update_irq;
k->save_config = virtio_mmio_save_config;
k->load_config = virtio_mmio_load_config;
- k->set_host_notifier = virtio_mmio_set_host_notifier;
k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
+ k->ioeventfd_started = virtio_mmio_ioeventfd_started;
+ k->ioeventfd_set_started = virtio_mmio_ioeventfd_set_started;
+ k->ioeventfd_disabled = virtio_mmio_ioeventfd_disabled;
+ k->ioeventfd_set_disabled = virtio_mmio_ioeventfd_set_disabled;
+ k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
k->has_variable_vring_alignment = true;
bus_class->max_dev = 1;
}
--
2.6.5
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [Qemu-devel] [PATCH 6/6] virtio-bus: remove old set_host_notifier callback
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
` (4 preceding siblings ...)
2016-03-24 16:15 ` [Qemu-devel] [PATCH 5/6] virtio-mmio: " Cornelia Huck
@ 2016-03-24 16:15 ` Cornelia Huck
2016-03-24 17:06 ` [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Paolo Bonzini
` (3 subsequent siblings)
9 siblings, 0 replies; 23+ messages in thread
From: Cornelia Huck @ 2016-03-24 16:15 UTC (permalink / raw)
To: qemu-devel
Cc: famz, borntraeger, mst, tubo, stefanha, Cornelia Huck, pbonzini
All users have been converted to the new ioevent callbacks.
Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
---
hw/block/dataplane/virtio-blk.c | 12 ++----------
hw/scsi/virtio-scsi-dataplane.c | 19 ++++---------------
hw/virtio/vhost.c | 13 +------------
include/hw/virtio/virtio-bus.h | 1 -
4 files changed, 7 insertions(+), 38 deletions(-)
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 9ca41d7..1b2d5fa 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -132,8 +132,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
}
/* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers ||
- (!k->set_host_notifier && !k->ioeventfd_started)) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_started) {
error_setg(errp,
"device is incompatible with dataplane "
"(transport does not support notifiers)");
@@ -211,9 +210,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
/* Set up virtqueue notify */
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), 0, true);
- if (r == -ENOSYS) {
- r = k->set_host_notifier(qbus->parent, 0, true);
- }
if (r != 0) {
fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
goto fail_host_notifier;
@@ -248,7 +244,6 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
- int r;
if (!vblk->dataplane_started || s->stopping) {
return;
@@ -273,10 +268,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
aio_context_release(s->ctx);
- r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), 0, false);
- if (r == -ENOSYS) {
- k->set_host_notifier(qbus->parent, 0, false);
- }
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), 0, false);
/* Clean up guest notifier (irq) */
k->set_guest_notifiers(qbus->parent, 1, false);
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 88c3abd..88773d2 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -32,8 +32,7 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
s->ctx = iothread_get_aio_context(vs->conf.iothread);
/* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers ||
- (!k->set_host_notifier && !k->ioeventfd_started)) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_started) {
fprintf(stderr, "virtio-scsi: Failed to set iothread "
"(transport does not support notifiers)");
exit(1);
@@ -43,14 +42,10 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int rc;
/* Set up virtqueue notify */
rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true);
- if (rc == -ENOSYS) {
- rc = k->set_host_notifier(qbus->parent, n, true);
- }
if (rc != 0) {
fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n",
rc);
@@ -133,10 +128,7 @@ fail_vrings:
virtio_scsi_clear_aio(s);
aio_context_release(s->ctx);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
- rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
- if (rc == -ENOSYS) {
- k->set_host_notifier(qbus->parent, i, false);
- }
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
fail_guest_notifiers:
@@ -151,7 +143,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
- int i, rc;
+ int i;
if (!s->dataplane_started || s->dataplane_stopping) {
return;
@@ -175,10 +167,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
aio_context_release(s->ctx);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
- rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
- if (rc == -ENOSYS) {
- k->set_host_notifier(qbus->parent, i, false);
- }
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
/* Clean up guest notifier (irq) */
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 4b8c151..f0e3a25 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1113,7 +1113,7 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r, e;
- if (!k->set_host_notifier || !k->ioeventfd_started) {
+ if (!k->ioeventfd_started) {
fprintf(stderr, "binding does not support host notifiers\n");
r = -ENOSYS;
goto fail;
@@ -1122,9 +1122,6 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
for (i = 0; i < hdev->nvqs; ++i) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
true);
- if (r == -ENOSYS) {
- r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
- }
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
goto fail_vq;
@@ -1136,9 +1133,6 @@ fail_vq:
while (--i >= 0) {
e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
false);
- if (e == -ENOSYS) {
- e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
- }
if (e < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
fflush(stderr);
@@ -1157,16 +1151,11 @@ fail:
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusState *vbus = VIRTIO_BUS(qbus);
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r;
for (i = 0; i < hdev->nvqs; ++i) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
false);
- if (r == -ENOSYS) {
- r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
- }
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
fflush(stderr);
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 9637f80..f3e5ef3 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -52,7 +52,6 @@ typedef struct VirtioBusClass {
bool (*has_extra_state)(DeviceState *d);
bool (*query_guest_notifiers)(DeviceState *d);
int (*set_guest_notifiers)(DeviceState *d, int nvqs, bool assign);
- int (*set_host_notifier)(DeviceState *d, int n, bool assigned);
void (*vmstate_change)(DeviceState *d, bool running);
/*
* transport independent init function.
--
2.6.5
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
` (5 preceding siblings ...)
2016-03-24 16:15 ` [Qemu-devel] [PATCH 6/6] virtio-bus: remove old set_host_notifier callback Cornelia Huck
@ 2016-03-24 17:06 ` Paolo Bonzini
2016-03-29 8:18 ` Cornelia Huck
2016-03-25 9:52 ` Fam Zheng
` (2 subsequent siblings)
9 siblings, 1 reply; 23+ messages in thread
From: Paolo Bonzini @ 2016-03-24 17:06 UTC (permalink / raw)
To: Cornelia Huck, qemu-devel; +Cc: tubo, borntraeger, famz, stefanha, mst
On 24/03/2016 17:15, Cornelia Huck wrote:
> Here's the next version of my refactoring of the virtio host notifiers.
> This one actually survives a bit of testing for me (reboot loop).
>
> As this patchset fixes a latent bug exposed by the recent dataplane
> changes (we have a deassigned ioeventfd for a short period of time
> during dataplane start, which leads to the virtqueue handler being
> called in both the vcpu thread and the iothread simultaneously), I'd
> like to see this in 2.6.
Tested-by: Paolo Bonzini <pbonzini@redhat.com>
Resisted 6 minutes versus 10 seconds. At about 2.5 seconds per reboot,
that means the failure happened at the fourth reboot before, and
resisted about 150 reboots with your patches.
My testcase was to add "systemd.unit=reboot.target" to a Fedora 21's
kernel command line and run the following
./+build/x86_64-softmmu/qemu-system-x86_64 --enable-kvm -m 512 \
-smp 4 -serial mon:stdio -display none -object iothread,id=io \
-drive if=none,id=hd,file=/vm/virt_test/images/jeos-21-64.qcow2 \
-device virtio-blk-pci,drive=hd,iothread=io \
-drive if=none,file=null-co://,id=n1 \
-drive if=none,file=null-co://,id=n2 \
-drive if=none,file=null-co://,id=n3 \
-drive if=none,file=null-co://,id=n4 \
-drive if=none,file=null-co://,id=n5 \
-drive if=none,file=null-co://,id=n6 \
-drive if=none,file=null-co://,id=n7 \
-drive if=none,file=null-co://,id=n8 \
-device virtio-blk-pci,iothread=io,drive=n1 \
-device virtio-blk-pci,iothread=io,drive=n2 \
-device virtio-blk-pci,iothread=io,drive=n3 \
-device virtio-blk-pci,iothread=io,drive=n4 \
-device virtio-blk-pci,iothread=io,drive=n5 \
-device virtio-blk-pci,iothread=io,drive=n6 \
-device virtio-blk-pci,iothread=io,drive=n7 \
-device virtio-blk-pci,iothread=io,drive=n8
with the assertion patch applied:
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index cb710f1..d0b8248 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -591,6 +591,7 @@
return;
}
+ assert(atomic_fetch_inc(&s->reentrancy_test) == 0);
blk_io_plug(s->blk);
while ((req = virtio_blk_get_request(s))) {
@@ -602,6 +603,7 @@
}
blk_io_unplug(s->blk);
+ atomic_dec(&s->reentrancy_test);
}
static void virtio_blk_dma_restart_bh(void *opaque)
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index ae84d92..5cb66cd 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -54,6 +54,7 @@ typedef struct VirtIOBlock {
bool original_wce;
VMChangeStateEntry *change;
bool dataplane_started;
+ int reentrancy_test;
struct VirtIOBlockDataPlane *dataplane;
} VirtIOBlock;
Thanks for your help!
Paolo
> Changes from RFC:
> - Fixed some silly errors (checking for !disabled instead of disabled,
> virtio_ccw_stop_ioeventfd() calling virtio_bus_start_ioeventfd()).
> - Completely reworked set_host_notifier(): We only want to set/unset
> the actual handler function and don't want to do anything to the
> ioeventfd backing, so reduce the function to actually doing only
> that.
> - With the change above, we can lose the 'assign' parameter in
> virtio_bus_stop_ioeventfd() again.
> - Added more comments that hopefully make it clearer what is going on.
>
> I'd appreciate it if people could give it some testing; I'll be back
> to look at the fallout after Easter.
>
> Cornelia Huck (6):
> virtio-bus: common ioeventfd infrastructure
> virtio-bus: have callers tolerate new host notifier api
> virtio-ccw: convert to ioeventfd callbacks
> virtio-pci: convert to ioeventfd callbacks
> virtio-mmio: convert to ioeventfd callbacks
> virtio-bus: remove old set_host_notifier callback
>
> hw/block/dataplane/virtio-blk.c | 6 +-
> hw/s390x/virtio-ccw.c | 133 ++++++++++++++--------------------------
> hw/scsi/virtio-scsi-dataplane.c | 9 ++-
> hw/virtio/vhost.c | 13 ++--
> hw/virtio/virtio-bus.c | 132 +++++++++++++++++++++++++++++++++++++++
> hw/virtio/virtio-mmio.c | 128 +++++++++++++-------------------------
> hw/virtio/virtio-pci.c | 124 +++++++++++++------------------------
> include/hw/virtio/virtio-bus.h | 31 +++++++++-
> 8 files changed, 303 insertions(+), 273 deletions(-)
>
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-24 17:06 ` [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Paolo Bonzini
@ 2016-03-29 8:18 ` Cornelia Huck
2016-03-29 9:15 ` Paolo Bonzini
0 siblings, 1 reply; 23+ messages in thread
From: Cornelia Huck @ 2016-03-29 8:18 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: famz, borntraeger, mst, qemu-devel, tubo, stefanha
On Thu, 24 Mar 2016 18:06:21 +0100
Paolo Bonzini <pbonzini@redhat.com> wrote:
> On 24/03/2016 17:15, Cornelia Huck wrote:
> > Here's the next version of my refactoring of the virtio host notifiers.
> > This one actually survives a bit of testing for me (reboot loop).
> >
> > As this patchset fixes a latent bug exposed by the recent dataplane
> > changes (we have a deassigned ioeventfd for a short period of time
> > during dataplane start, which leads to the virtqueue handler being
> > called in both the vcpu thread and the iothread simultaneously), I'd
> > like to see this in 2.6.
>
> Tested-by: Paolo Bonzini <pbonzini@redhat.com>
>
> Resisted 6 minutes versus 10 seconds. At about 2.5 seconds per reboot,
> that means the failure happened at the fourth reboot before, and
> resisted about 150 reboots with your patches.
Thanks for testing!
Is the failure still the same? I thought I had understood the problem
by now, and I'm wondering which hole we're still missing.
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 8:18 ` Cornelia Huck
@ 2016-03-29 9:15 ` Paolo Bonzini
0 siblings, 0 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-03-29 9:15 UTC (permalink / raw)
To: Cornelia Huck; +Cc: famz, borntraeger, mst, qemu-devel, tubo, stefanha
On 29/03/2016 10:18, Cornelia Huck wrote:
>> >
>> > Tested-by: Paolo Bonzini <pbonzini@redhat.com>
>> >
>> > Resisted 6 minutes versus 10 seconds. At about 2.5 seconds per reboot,
>> > that means the failure happened at the fourth reboot before, and
>> > resisted about 150 reboots with your patches.
>
> Thanks for testing!
>
> Is the failure still the same? I thought I had understood the problem
> by now, and I'm wondering which hole we're still missing.
No, I just had to leave after the 6 minutes so I turned it off. :)
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
` (6 preceding siblings ...)
2016-03-24 17:06 ` [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Paolo Bonzini
@ 2016-03-25 9:52 ` Fam Zheng
2016-03-28 3:55 ` TU BO
2016-03-29 13:23 ` Christian Borntraeger
9 siblings, 0 replies; 23+ messages in thread
From: Fam Zheng @ 2016-03-25 9:52 UTC (permalink / raw)
To: Cornelia Huck; +Cc: tubo, mst, qemu-devel, borntraeger, stefanha, pbonzini
On Thu, 03/24 17:15, Cornelia Huck wrote:
> Here's the next version of my refactoring of the virtio host notifiers.
> This one actually survives a bit of testing for me (reboot loop).
>
> As this patchset fixes a latent bug exposed by the recent dataplane
> changes (we have a deassigned ioeventfd for a short period of time
> during dataplane start, which leads to the virtqueue handler being
> called in both the vcpu thread and the iothread simultaneously), I'd
> like to see this in 2.6.
>
> Changes from RFC:
> - Fixed some silly errors (checking for !disabled instead of disabled,
> virtio_ccw_stop_ioeventfd() calling virtio_bus_start_ioeventfd()).
> - Completely reworked set_host_notifier(): We only want to set/unset
> the actual handler function and don't want to do anything to the
> ioeventfd backing, so reduce the function to actually doing only
> that.
> - With the change above, we can lose the 'assign' parameter in
> virtio_bus_stop_ioeventfd() again.
> - Added more comments that hopefully make it clearer what is going on.
>
> I'd appreciate it if people could give it some testing; I'll be back
> to look at the fallout after Easter.
Tested-by: Fam Zheng <famz@redhat.com>
>
> Cornelia Huck (6):
> virtio-bus: common ioeventfd infrastructure
> virtio-bus: have callers tolerate new host notifier api
> virtio-ccw: convert to ioeventfd callbacks
> virtio-pci: convert to ioeventfd callbacks
> virtio-mmio: convert to ioeventfd callbacks
> virtio-bus: remove old set_host_notifier callback
>
> hw/block/dataplane/virtio-blk.c | 6 +-
> hw/s390x/virtio-ccw.c | 133 ++++++++++++++--------------------------
> hw/scsi/virtio-scsi-dataplane.c | 9 ++-
> hw/virtio/vhost.c | 13 ++--
> hw/virtio/virtio-bus.c | 132 +++++++++++++++++++++++++++++++++++++++
> hw/virtio/virtio-mmio.c | 128 +++++++++++++-------------------------
> hw/virtio/virtio-pci.c | 124 +++++++++++++------------------------
> include/hw/virtio/virtio-bus.h | 31 +++++++++-
> 8 files changed, 303 insertions(+), 273 deletions(-)
>
> --
> 2.6.5
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
` (7 preceding siblings ...)
2016-03-25 9:52 ` Fam Zheng
@ 2016-03-28 3:55 ` TU BO
2016-03-28 18:11 ` Paolo Bonzini
2016-03-29 13:23 ` Christian Borntraeger
9 siblings, 1 reply; 23+ messages in thread
From: TU BO @ 2016-03-28 3:55 UTC (permalink / raw)
To: Cornelia Huck, qemu-devel; +Cc: pbonzini, famz, borntraeger, stefanha, mst
Hi Cornelia:
I got two crash with qemu master + "[PATCH 0/6] virtio: refactor host
notifiers",
I can get first crash very often.
(gdb) bt
#0 blk_aio_read_entry (opaque=0x0) at block/block-backend.c:922
#1 0x000002aa17a65f0e in coroutine_trampoline (i0=<optimized out>,
i1=-1677713216) at util/coroutine-ucontext.c:78
#2 0x000003ffabfd150a in __makecontext_ret () from /lib64/libc.so.6
(gdb) list
917 static void blk_aio_read_entry(void *opaque)
918 {
919 BlkAioEmAIOCB *acb = opaque;
920 BlkRwCo *rwco = &acb->rwco;
921
922 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset,
rwco->qiov->size,
923 rwco->qiov, rwco->flags);
924 blk_aio_complete(acb);
925 }
For 2nd crash, I just saw it once, and didn't reproduce it later.
(gdb) bt
#0 ioq_submit (s=s@entry=0x2aa3fe2cc80) at block/linux-aio.c:197
#1 0x000002aa3f645b36 in qemu_laio_completion_bh (opaque=0x2aa3fe2cc80)
at block/linux-aio.c:143
#2 0x000002aa3f5ffbf0 in aio_bh_call (bh=<optimized out>) at async.c:65
#3 aio_bh_poll (ctx=0x2aa3fdf7e00) at async.c:93
#4 0x000002aa3f60a51e in aio_dispatch (ctx=ctx@entry=0x2aa3fdf7e00) at
aio-posix.c:306
#5 0x000002aa3f60a7ca in aio_poll (ctx=0x2aa3fdf7e00,
blocking=<optimized out>) at aio-posix.c:475
#6 0x000002aa3f53903c in iothread_run (opaque=0x2aa3fdf7220) at
iothread.c:46
#7 0x000003ffa86084c6 in start_thread () from /lib64/libpthread.so.0
#8 0x000003ffa7c82ec2 in thread_start () from /lib64/libc.so.6
(gdb) list
192 struct iocb *iocbs[MAX_QUEUED_IO];
193 QSIMPLEQ_HEAD(, qemu_laiocb) completed;
194
195 do {
196 len = 0;
197 QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
198 iocbs[len++] = &aiocb->iocb;
199 if (len == MAX_QUEUED_IO) {
200 break;
201 }
On 16/3/25 上午12:15, Cornelia Huck wrote:
> Here's the next version of my refactoring of the virtio host notifiers.
> This one actually survives a bit of testing for me (reboot loop).
>
> As this patchset fixes a latent bug exposed by the recent dataplane
> changes (we have a deassigned ioeventfd for a short period of time
> during dataplane start, which leads to the virtqueue handler being
> called in both the vcpu thread and the iothread simultaneously), I'd
> like to see this in 2.6.
>
> Changes from RFC:
> - Fixed some silly errors (checking for !disabled instead of disabled,
> virtio_ccw_stop_ioeventfd() calling virtio_bus_start_ioeventfd()).
> - Completely reworked set_host_notifier(): We only want to set/unset
> the actual handler function and don't want to do anything to the
> ioeventfd backing, so reduce the function to actually doing only
> that.
> - With the change above, we can lose the 'assign' parameter in
> virtio_bus_stop_ioeventfd() again.
> - Added more comments that hopefully make it clearer what is going on.
>
> I'd appreciate it if people could give it some testing; I'll be back
> to look at the fallout after Easter.
>
> Cornelia Huck (6):
> virtio-bus: common ioeventfd infrastructure
> virtio-bus: have callers tolerate new host notifier api
> virtio-ccw: convert to ioeventfd callbacks
> virtio-pci: convert to ioeventfd callbacks
> virtio-mmio: convert to ioeventfd callbacks
> virtio-bus: remove old set_host_notifier callback
>
> hw/block/dataplane/virtio-blk.c | 6 +-
> hw/s390x/virtio-ccw.c | 133 ++++++++++++++--------------------------
> hw/scsi/virtio-scsi-dataplane.c | 9 ++-
> hw/virtio/vhost.c | 13 ++--
> hw/virtio/virtio-bus.c | 132 +++++++++++++++++++++++++++++++++++++++
> hw/virtio/virtio-mmio.c | 128 +++++++++++++-------------------------
> hw/virtio/virtio-pci.c | 124 +++++++++++++------------------------
> include/hw/virtio/virtio-bus.h | 31 +++++++++-
> 8 files changed, 303 insertions(+), 273 deletions(-)
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-28 3:55 ` TU BO
@ 2016-03-28 18:11 ` Paolo Bonzini
2016-03-29 9:14 ` tu bo
0 siblings, 1 reply; 23+ messages in thread
From: Paolo Bonzini @ 2016-03-28 18:11 UTC (permalink / raw)
To: TU BO, Cornelia Huck, qemu-devel; +Cc: borntraeger, famz, stefanha, mst
On 28/03/2016 05:55, TU BO wrote:
> Hi Cornelia:
>
> I got two crash with qemu master + "[PATCH 0/6] virtio: refactor host
> notifiers",
Hi Tu Bo,
please always include the assertion patch at
https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
your tests. Can you include the backtrace from all threads with that patch?
Thanks,
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-28 18:11 ` Paolo Bonzini
@ 2016-03-29 9:14 ` tu bo
2016-03-29 11:45 ` Cornelia Huck
2016-03-29 11:54 ` Christian Borntraeger
0 siblings, 2 replies; 23+ messages in thread
From: tu bo @ 2016-03-29 9:14 UTC (permalink / raw)
To: Paolo Bonzini, Cornelia Huck, qemu-devel; +Cc: borntraeger, famz, stefanha, mst
Hi Paolo:
On 03/29/2016 02:11 AM, Paolo Bonzini wrote:
> On 28/03/2016 05:55, TU BO wrote:
>> Hi Cornelia:
>>
>> I got two crash with qemu master + "[PATCH 0/6] virtio: refactor host
>> notifiers",
>
> Hi Tu Bo,
>
> please always include the assertion patch at
> https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
> your tests. Can you include the backtrace from all threads with that patch?
>
thanks for your reminder about the assertion patch. Here is the
backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio:
refactor host notifiers",
I got two crashes,
1. For 1st crash,
(gdb) thread apply all bt
Thread 8 (Thread 0x3ff8daf1910 (LWP 52859)):
#0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
#1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
#2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x3ff88000fa8,
ms=<optimized out>) at util/qemu-thread-posix.c:245
#3 0x000002aa2d6803e4 in worker_thread (opaque=0x3ff88000f40) at
thread-pool.c:92
#4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
Thread 7 (Thread 0x3ff8e679910 (LWP 52856)):
#0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
#1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
#2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x2aa2e1fbfa8,
ms=<optimized out>) at util/qemu-thread-posix.c:245
#3 0x000002aa2d6803e4 in worker_thread (opaque=0x2aa2e1fbf40) at
thread-pool.c:92
#4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
Thread 6 (Thread 0x3ff9497f910 (LWP 52850)):
#0 0x000003ff9718c50e in pthread_cond_wait@@GLIBC_2.3.2 () from
/lib64/libpthread.so.0
#1 0x000003ff96d19792 in g_cond_wait () from /lib64/libglib-2.0.so.0
#2 0x000002aa2d7165d2 in wait_for_trace_records_available () at
trace/simple.c:147
---Type <return> to continue, or q <return> to quit---
#3 writeout_thread (opaque=<optimized out>) at trace/simple.c:165
#4 0x000003ff96cfa44c in g_thread_proxy () from /lib64/libglib-2.0.so.0
#5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
#6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
Thread 5 (Thread 0x3ff8efff910 (LWP 52855)):
#0 0x000003ff967f819a in ioctl () from /lib64/libc.so.6
#1 0x000002aa2d546f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa2e239030,
type=type@entry=44672)
at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
#2 0x000002aa2d54701e in kvm_cpu_exec (cpu=0x2aa2e239030) at
/usr/src/debug/qemu-2.5.50/kvm-all.c:1834
#3 0x000002aa2d533cd6 in qemu_kvm_cpu_thread_fn (arg=<optimized out>)
at /usr/src/debug/qemu-2.5.50/cpus.c:1056
#4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
Thread 4 (Thread 0x3ff951ff910 (LWP 52849)):
#0 0x000003ff967fcf56 in syscall () from /lib64/libc.so.6
#1 0x000002aa2d755a36 in futex_wait (val=<optimized out>, ev=<optimized
out>) at util/qemu-thread-posix.c:292
#2 qemu_event_wait (ev=0x2aa2ddb5914 <rcu_call_ready_event>) at
util/qemu-thread-posix.c:399
#3 0x000002aa2d765002 in call_rcu_thread (opaque=<optimized out>) at
util/rcu.c:250
#4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
---Type <return> to continue, or q <return> to quit---
Thread 3 (Thread 0x3ff978e0bf0 (LWP 52845)):
#0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
#1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0,
__nfds=<optimized out>, __fds=<optimized out>) at
/usr/include/bits/poll2.h:77
#2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1)
at qemu-timer.c:313
#3 0x000002aa2d688b02 in os_host_main_loop_wait (timeout=-1) at
main-loop.c:251
#4 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:505
#5 0x000002aa2d4faade in main_loop () at vl.c:1933
#6 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized
out>) at vl.c:4646
Thread 2 (Thread 0x3ff8ffff910 (LWP 52851)):
#0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
#1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0,
__nfds=<optimized out>, __fds=<optimized out>) at
/usr/include/bits/poll2.h:77
#2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1)
at qemu-timer.c:313
#3 0x000002aa2d68a788 in aio_poll (ctx=0x2aa2de77e00,
blocking=<optimized out>) at aio-posix.c:453
#4 0x000002aa2d5b909c in iothread_run (opaque=0x2aa2de77220) at
iothread.c:46
#5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
#6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
Thread 1 (Thread 0x3ff8f7ff910 (LWP 52854)):
#0 0x000003ff9673b650 in raise () from /lib64/libc.so.6
---Type <return> to continue, or q <return> to quit---
#1 0x000003ff9673ced8 in abort () from /lib64/libc.so.6
#2 0x000003ff96733666 in __assert_fail_base () from /lib64/libc.so.6
#3 0x000003ff967336f4 in __assert_fail () from /lib64/libc.so.6
#4 0x000002aa2d562608 in virtio_blk_handle_output (vdev=<optimized
out>, vq=<optimized out>)
at /usr/src/debug/qemu-2.5.50/hw/block/virtio-blk.c:595
#5 0x000002aa2d587464 in virtio_ccw_hcall_notify (args=<optimized out>)
at /usr/src/debug/qemu-2.5.50/hw/s390x/s390-virtio-ccw.c:64
#6 0x000002aa2d58236c in s390_virtio_hypercall (env=0x2aa2e205660) at
/usr/src/debug/qemu-2.5.50/hw/s390x/s390-virtio-hcall.c:35
#7 0x000002aa2d5b0920 in handle_hypercall (run=<optimized out>,
cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1258
#8 handle_diag (ipb=<optimized out>, run=0x3ff94080000,
cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1327
#9 handle_instruction (run=0x3ff94080000, cpu=0x2aa2e1fd390) at
/usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1774
#10 handle_intercept (cpu=0x2aa2e1fd390) at
/usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1817
#11 kvm_arch_handle_exit (cs=<optimized out>, run=<optimized out>) at
/usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:2003
#12 0x000002aa2d547072 in kvm_cpu_exec (cpu=0x2aa2e1fd390) at
/usr/src/debug/qemu-2.5.50/kvm-all.c:1921
#13 0x000002aa2d533cd6 in qemu_kvm_cpu_thread_fn (arg=<optimized out>)
at /usr/src/debug/qemu-2.5.50/cpus.c:1056
#14 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
#15 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
2. For 2nd crash,
(gdb) thread apply all bt
Thread 10 (Thread 0x3ffacdff910 (LWP 52818)):
#0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
#1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09d7d390,
type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
#2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09d7d390) at
/usr/src/debug/qemu-2.5.50/kvm-all.c:1834
#3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09d7d390) at
/usr/src/debug/qemu-2.5.50/cpus.c:1056
#4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
Thread 9 (Thread 0x3ff5feff910 (LWP 52819)):
#0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
#1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09db91d0,
type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
#2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09db91d0) at
/usr/src/debug/qemu-2.5.50/kvm-all.c:1834
#3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09db91d0) at
/usr/src/debug/qemu-2.5.50/cpus.c:1056
#4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
Thread 8 (Thread 0x3ff5e6ff910 (LWP 52822)):
#0 0x000003ffb068ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
#1 0x000003ffb068ed76 in sem_timedwait () from /lib64/libpthread.so.0
#2 0x000002aa092d5868 in qemu_sem_timedwait (sem=0x2aa09dfc288,
ms=<optimized out>) at util/qemu-thread-posix.c:245
#3 0x000002aa092003e4 in worker_thread (opaque=0x2aa09dfc220) at
thread-pool.c:92
#4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
Thread 7 (Thread 0x3ffade7f910 (LWP 52814)):
#0 0x000003ffb068c50e in pthread_cond_wait@@GLIBC_2.3.2 () from
/lib64/libpthread.so.0
#1 0x000003ffb0219792 in g_cond_wait () from /lib64/libglib-2.0.so.0
#2 0x000002aa092965d2 in wait_for_trace_records_available () at
trace/simple.c:147
#3 writeout_thread (opaque=<optimized out>) at trace/simple.c:165
#4 0x000003ffb01fa44c in g_thread_proxy () from /lib64/libglib-2.0.so.0
#5 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#6 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
Thread 6 (Thread 0x3ff5dcfd910 (LWP 52824)):
#0 0x000003ffb068ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
#1 0x000003ffb068ed76 in sem_timedwait () from /lib64/libpthread.so.0
#2 0x000002aa092d5868 in qemu_sem_timedwait (sem=0x3ffa0000fa8,
ms=<optimized out>) at util/qemu-thread-posix.c:245
#3 0x000002aa092003e4 in worker_thread (opaque=0x3ffa0000f40) at
thread-pool.c:92
#4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
Thread 5 (Thread 0x3ffae6ff910 (LWP 52813)):
#0 0x000003ffafcfcf56 in syscall () from /lib64/libc.so.6
#1 0x000002aa092d5a36 in futex_wait (val=<optimized out>, ev=<optimized
out>) at util/qemu-thread-posix.c:292
#2 qemu_event_wait (ev=0x2aa09935914 <rcu_call_ready_event>) at
util/qemu-thread-posix.c:399
#3 0x000002aa092e5002 in call_rcu_thread (opaque=<optimized out>) at
util/rcu.c:250
#4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
Thread 4 (Thread 0x3ff5f6ff910 (LWP 52820)):
#0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
#1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09dcb490,
type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
#2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09dcb490) at
/usr/src/debug/qemu-2.5.50/kvm-all.c:1834
#3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09dcb490) at
/usr/src/debug/qemu-2.5.50/cpus.c:1056
#4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
---Type <return> to continue, or q <return> to quit---
Thread 3 (Thread 0x3ffb0de0bf0 (LWP 52773)):
#0 0x000003ffafcf66e6 in ppoll () from /lib64/libc.so.6
#1 0x000002aa0920928e in ppoll (__ss=0x0, __timeout=0x0,
__nfds=<optimized out>, __fds=<optimized out>) at
/usr/include/bits/poll2.h:77
#2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1)
at qemu-timer.c:313
#3 0x000002aa09208b02 in os_host_main_loop_wait (timeout=-1) at
main-loop.c:251
#4 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:505
#5 0x000002aa0907aade in main_loop () at vl.c:1933
#6 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized
out>) at vl.c:4646
Thread 2 (Thread 0x3ff5eeff910 (LWP 52821)):
#0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
#1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09ddd750,
type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
#2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09ddd750) at
/usr/src/debug/qemu-2.5.50/kvm-all.c:1834
#3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09ddd750) at
/usr/src/debug/qemu-2.5.50/cpus.c:1056
#4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
#5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
Thread 1 (Thread 0x3ffad67f910 (LWP 52815)):
#0 blk_aio_read_entry (opaque=0x0) at block/block-backend.c:922
#1 0x000002aa092e5f6e in coroutine_trampoline (i0=<optimized out>,
i1=1342188224) at util/coroutine-ucontext.c:78
#2 0x000003ffafc5150a in __makecontext_ret () from /lib64/libc.so.6
> Thanks,
>
> Paolo
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 9:14 ` tu bo
@ 2016-03-29 11:45 ` Cornelia Huck
2016-03-29 13:50 ` Paolo Bonzini
2016-03-29 11:54 ` Christian Borntraeger
1 sibling, 1 reply; 23+ messages in thread
From: Cornelia Huck @ 2016-03-29 11:45 UTC (permalink / raw)
To: tu bo; +Cc: famz, mst, qemu-devel, borntraeger, stefanha, Paolo Bonzini
On Tue, 29 Mar 2016 17:14:21 +0800
tu bo <tubo@linux.vnet.ibm.com> wrote:
> Hi Paolo:
>
> On 03/29/2016 02:11 AM, Paolo Bonzini wrote:
> > On 28/03/2016 05:55, TU BO wrote:
> >> Hi Cornelia:
> >>
> >> I got two crash with qemu master + "[PATCH 0/6] virtio: refactor host
> >> notifiers",
> >
> > Hi Tu Bo,
> >
> > please always include the assertion patch at
> > https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
> > your tests. Can you include the backtrace from all threads with that patch?
> >
> thanks for your reminder about the assertion patch. Here is the
> backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio:
> refactor host notifiers",
FWIW, I've been running this in a reboot loop for the last 2 1/2 hours.
Could you perhaps share your command line?
>
> I got two crashes,
>
> 1. For 1st crash,
> (gdb) thread apply all bt
This one looks a lot like the crashes before the rework, which I don't
understand...
>
> Thread 8 (Thread 0x3ff8daf1910 (LWP 52859)):
> #0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x3ff88000fa8,
> ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa2d6803e4 in worker_thread (opaque=0x3ff88000f40) at
> thread-pool.c:92
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 7 (Thread 0x3ff8e679910 (LWP 52856)):
> #0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x2aa2e1fbfa8,
> ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa2d6803e4 in worker_thread (opaque=0x2aa2e1fbf40) at
> thread-pool.c:92
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 6 (Thread 0x3ff9497f910 (LWP 52850)):
> #0 0x000003ff9718c50e in pthread_cond_wait@@GLIBC_2.3.2 () from
> /lib64/libpthread.so.0
> #1 0x000003ff96d19792 in g_cond_wait () from /lib64/libglib-2.0.so.0
> #2 0x000002aa2d7165d2 in wait_for_trace_records_available () at
> trace/simple.c:147
> ---Type <return> to continue, or q <return> to quit---
> #3 writeout_thread (opaque=<optimized out>) at trace/simple.c:165
> #4 0x000003ff96cfa44c in g_thread_proxy () from /lib64/libglib-2.0.so.0
> #5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 5 (Thread 0x3ff8efff910 (LWP 52855)):
> #0 0x000003ff967f819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa2d546f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa2e239030,
> type=type@entry=44672)
> at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa2d54701e in kvm_cpu_exec (cpu=0x2aa2e239030) at
> /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa2d533cd6 in qemu_kvm_cpu_thread_fn (arg=<optimized out>)
> at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 4 (Thread 0x3ff951ff910 (LWP 52849)):
> #0 0x000003ff967fcf56 in syscall () from /lib64/libc.so.6
> #1 0x000002aa2d755a36 in futex_wait (val=<optimized out>, ev=<optimized
> out>) at util/qemu-thread-posix.c:292
> #2 qemu_event_wait (ev=0x2aa2ddb5914 <rcu_call_ready_event>) at
> util/qemu-thread-posix.c:399
> #3 0x000002aa2d765002 in call_rcu_thread (opaque=<optimized out>) at
> util/rcu.c:250
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
> ---Type <return> to continue, or q <return> to quit---
>
> Thread 3 (Thread 0x3ff978e0bf0 (LWP 52845)):
> #0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
> #1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0,
> __nfds=<optimized out>, __fds=<optimized out>) at
> /usr/include/bits/poll2.h:77
> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1)
> at qemu-timer.c:313
> #3 0x000002aa2d688b02 in os_host_main_loop_wait (timeout=-1) at
> main-loop.c:251
> #4 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:505
> #5 0x000002aa2d4faade in main_loop () at vl.c:1933
> #6 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized
> out>) at vl.c:4646
>
> Thread 2 (Thread 0x3ff8ffff910 (LWP 52851)):
> #0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
> #1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0,
> __nfds=<optimized out>, __fds=<optimized out>) at
> /usr/include/bits/poll2.h:77
> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1)
> at qemu-timer.c:313
> #3 0x000002aa2d68a788 in aio_poll (ctx=0x2aa2de77e00,
> blocking=<optimized out>) at aio-posix.c:453
> #4 0x000002aa2d5b909c in iothread_run (opaque=0x2aa2de77220) at
> iothread.c:46
> #5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 1 (Thread 0x3ff8f7ff910 (LWP 52854)):
> #0 0x000003ff9673b650 in raise () from /lib64/libc.so.6
> ---Type <return> to continue, or q <return> to quit---
> #1 0x000003ff9673ced8 in abort () from /lib64/libc.so.6
> #2 0x000003ff96733666 in __assert_fail_base () from /lib64/libc.so.6
> #3 0x000003ff967336f4 in __assert_fail () from /lib64/libc.so.6
> #4 0x000002aa2d562608 in virtio_blk_handle_output (vdev=<optimized
> out>, vq=<optimized out>)
> at /usr/src/debug/qemu-2.5.50/hw/block/virtio-blk.c:595
I don't see how we could get here from the cpu thread anymore... Did
you see any failure messages for setting host notifiers, btw.?
Otherwise, it might be worthwile adding some instrumentation for
setting up the host notifiers, so we can trace when the handler switch
is actually done.
> #5 0x000002aa2d587464 in virtio_ccw_hcall_notify (args=<optimized out>)
> at /usr/src/debug/qemu-2.5.50/hw/s390x/s390-virtio-ccw.c:64
> #6 0x000002aa2d58236c in s390_virtio_hypercall (env=0x2aa2e205660) at
> /usr/src/debug/qemu-2.5.50/hw/s390x/s390-virtio-hcall.c:35
> #7 0x000002aa2d5b0920 in handle_hypercall (run=<optimized out>,
> cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1258
> #8 handle_diag (ipb=<optimized out>, run=0x3ff94080000,
> cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1327
> #9 handle_instruction (run=0x3ff94080000, cpu=0x2aa2e1fd390) at
> /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1774
> #10 handle_intercept (cpu=0x2aa2e1fd390) at
> /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1817
> #11 kvm_arch_handle_exit (cs=<optimized out>, run=<optimized out>) at
> /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:2003
> #12 0x000002aa2d547072 in kvm_cpu_exec (cpu=0x2aa2e1fd390) at
> /usr/src/debug/qemu-2.5.50/kvm-all.c:1921
> #13 0x000002aa2d533cd6 in qemu_kvm_cpu_thread_fn (arg=<optimized out>)
> at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #14 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #15 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
>
>
> 2. For 2nd crash,
> (gdb) thread apply all bt
>
> Thread 10 (Thread 0x3ffacdff910 (LWP 52818)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09d7d390,
> type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09d7d390) at
> /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09d7d390) at
> /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 9 (Thread 0x3ff5feff910 (LWP 52819)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09db91d0,
> type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09db91d0) at
> /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09db91d0) at
> /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 8 (Thread 0x3ff5e6ff910 (LWP 52822)):
> #0 0x000003ffb068ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ffb068ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa092d5868 in qemu_sem_timedwait (sem=0x2aa09dfc288,
> ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa092003e4 in worker_thread (opaque=0x2aa09dfc220) at
> thread-pool.c:92
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 7 (Thread 0x3ffade7f910 (LWP 52814)):
> #0 0x000003ffb068c50e in pthread_cond_wait@@GLIBC_2.3.2 () from
> /lib64/libpthread.so.0
> #1 0x000003ffb0219792 in g_cond_wait () from /lib64/libglib-2.0.so.0
> #2 0x000002aa092965d2 in wait_for_trace_records_available () at
> trace/simple.c:147
> #3 writeout_thread (opaque=<optimized out>) at trace/simple.c:165
> #4 0x000003ffb01fa44c in g_thread_proxy () from /lib64/libglib-2.0.so.0
> #5 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #6 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 6 (Thread 0x3ff5dcfd910 (LWP 52824)):
> #0 0x000003ffb068ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ffb068ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa092d5868 in qemu_sem_timedwait (sem=0x3ffa0000fa8,
> ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa092003e4 in worker_thread (opaque=0x3ffa0000f40) at
> thread-pool.c:92
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 5 (Thread 0x3ffae6ff910 (LWP 52813)):
> #0 0x000003ffafcfcf56 in syscall () from /lib64/libc.so.6
> #1 0x000002aa092d5a36 in futex_wait (val=<optimized out>, ev=<optimized
> out>) at util/qemu-thread-posix.c:292
> #2 qemu_event_wait (ev=0x2aa09935914 <rcu_call_ready_event>) at
> util/qemu-thread-posix.c:399
> #3 0x000002aa092e5002 in call_rcu_thread (opaque=<optimized out>) at
> util/rcu.c:250
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 4 (Thread 0x3ff5f6ff910 (LWP 52820)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09dcb490,
> type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09dcb490) at
> /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09dcb490) at
> /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
> ---Type <return> to continue, or q <return> to quit---
>
> Thread 3 (Thread 0x3ffb0de0bf0 (LWP 52773)):
> #0 0x000003ffafcf66e6 in ppoll () from /lib64/libc.so.6
> #1 0x000002aa0920928e in ppoll (__ss=0x0, __timeout=0x0,
> __nfds=<optimized out>, __fds=<optimized out>) at
> /usr/include/bits/poll2.h:77
> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1)
> at qemu-timer.c:313
> #3 0x000002aa09208b02 in os_host_main_loop_wait (timeout=-1) at
> main-loop.c:251
> #4 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:505
> #5 0x000002aa0907aade in main_loop () at vl.c:1933
> #6 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized
> out>) at vl.c:4646
>
> Thread 2 (Thread 0x3ff5eeff910 (LWP 52821)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09ddd750,
> type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09ddd750) at
> /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09ddd750) at
> /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 1 (Thread 0x3ffad67f910 (LWP 52815)):
> #0 blk_aio_read_entry (opaque=0x0) at block/block-backend.c:922
> #1 0x000002aa092e5f6e in coroutine_trampoline (i0=<optimized out>,
> i1=1342188224) at util/coroutine-ucontext.c:78
> #2 0x000003ffafc5150a in __makecontext_ret () from /lib64/libc.so.6
I don't see the crash in here?
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 11:45 ` Cornelia Huck
@ 2016-03-29 13:50 ` Paolo Bonzini
2016-03-29 16:27 ` Christian Borntraeger
0 siblings, 1 reply; 23+ messages in thread
From: Paolo Bonzini @ 2016-03-29 13:50 UTC (permalink / raw)
To: Cornelia Huck, tu bo; +Cc: borntraeger, famz, qemu-devel, stefanha, mst
On 29/03/2016 13:45, Cornelia Huck wrote:
> > > Hi Tu Bo,
> > >
> > > please always include the assertion patch at
> > > https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
> > > your tests. Can you include the backtrace from all threads with that patch?
> > >
> > thanks for your reminder about the assertion patch. Here is the
> > backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio:
> > refactor host notifiers",
>
> FWIW, I've been running this in a reboot loop for the last 2 1/2 hours.
> Could you perhaps share your command line?
>From code inspection, the following is also necessary or at least a
good idea:
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 6fb29e3..7fa8477 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -258,7 +258,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
aio_context_acquire(s->ctx);
/* Stop notifications for new requests from guest */
- virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, false, false);
+ virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, false);
/* Drain and switch bs back to the QEMU main loop */
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 13:50 ` Paolo Bonzini
@ 2016-03-29 16:27 ` Christian Borntraeger
2016-03-31 2:37 ` tu bo
2016-03-31 5:47 ` tu bo
0 siblings, 2 replies; 23+ messages in thread
From: Christian Borntraeger @ 2016-03-29 16:27 UTC (permalink / raw)
To: Paolo Bonzini, Cornelia Huck, tu bo; +Cc: famz, qemu-devel, stefanha, mst
On 03/29/2016 03:50 PM, Paolo Bonzini wrote:
>
>
> On 29/03/2016 13:45, Cornelia Huck wrote:
>>>> Hi Tu Bo,
>>>>
>>>> please always include the assertion patch at
>>>> https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
>>>> your tests. Can you include the backtrace from all threads with that patch?
>>>>
>>> thanks for your reminder about the assertion patch. Here is the
>>> backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio:
>>> refactor host notifiers",
>>
>> FWIW, I've been running this in a reboot loop for the last 2 1/2 hours.
>> Could you perhaps share your command line?
>
> From code inspection, the following is also necessary or at least a
> good idea:
>
> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
> index 6fb29e3..7fa8477 100644
> --- a/hw/block/dataplane/virtio-blk.c
> +++ b/hw/block/dataplane/virtio-blk.c
> @@ -258,7 +258,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
> aio_context_acquire(s->ctx);
>
> /* Stop notifications for new requests from guest */
> - virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, false, false);
> + virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, false);
>
> /* Drain and switch bs back to the QEMU main loop */
> blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
>
Right. Tu Bo, you seem to have the best testcase for this.
Does your setup runs fine with this on top?
CHristian
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 16:27 ` Christian Borntraeger
@ 2016-03-31 2:37 ` tu bo
2016-03-31 5:47 ` tu bo
1 sibling, 0 replies; 23+ messages in thread
From: tu bo @ 2016-03-31 2:37 UTC (permalink / raw)
To: Christian Borntraeger, Paolo Bonzini, Cornelia Huck
Cc: famz, qemu-devel, stefanha, mst
Hi Christian:
On 03/30/2016 12:27 AM, Christian Borntraeger wrote:
> On 03/29/2016 03:50 PM, Paolo Bonzini wrote:
>>
>>
>> On 29/03/2016 13:45, Cornelia Huck wrote:
>>>>> Hi Tu Bo,
>>>>>
>>>>> please always include the assertion patch at
>>>>> https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
>>>>> your tests. Can you include the backtrace from all threads with that patch?
>>>>>
>>>> thanks for your reminder about the assertion patch. Here is the
>>>> backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio:
>>>> refactor host notifiers",
>>>
>>> FWIW, I've been running this in a reboot loop for the last 2 1/2 hours.
>>> Could you perhaps share your command line?
>>
>> From code inspection, the following is also necessary or at least a
>> good idea:
>>
>> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
>> index 6fb29e3..7fa8477 100644
>> --- a/hw/block/dataplane/virtio-blk.c
>> +++ b/hw/block/dataplane/virtio-blk.c
>> @@ -258,7 +258,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
>> aio_context_acquire(s->ctx);
>>
>> /* Stop notifications for new requests from guest */
>> - virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, false, false);
>> + virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, false);
>>
>> /* Drain and switch bs back to the QEMU main loop */
>> blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
>>
>
> Right. Tu Bo, you seem to have the best testcase for this.
> Does your setup runs fine with this on top?
My test needs at least four scsi disks, which was broken now because of
the s38 firmware update. I'll test it when s38 scsi is ready. thx
>
> CHristian
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 16:27 ` Christian Borntraeger
2016-03-31 2:37 ` tu bo
@ 2016-03-31 5:47 ` tu bo
1 sibling, 0 replies; 23+ messages in thread
From: tu bo @ 2016-03-31 5:47 UTC (permalink / raw)
To: Christian Borntraeger, Paolo Bonzini, Cornelia Huck
Cc: famz, qemu-devel, stefanha, mst
Hi Christian:
I got the same crash with qemu master + assertion patch + "[PATCH 0/6]
virtio: refactor host notifiers" + Paolo's fix,
(gdb) bt
#0 blk_aio_read_entry (opaque=0x0) at block/block-backend.c:916
#1 0x000002aa2e8e88fe in coroutine_trampoline (i0=<optimized out>,
i1=-1677703696) at util/coroutine-ucontext.c:78
#2 0x000003ffa85d150a in __makecontext_ret () from /lib64/libc.so.6
On 03/30/2016 12:27 AM, Christian Borntraeger wrote:
> On 03/29/2016 03:50 PM, Paolo Bonzini wrote:
>>
>>
>> On 29/03/2016 13:45, Cornelia Huck wrote:
>>>>> Hi Tu Bo,
>>>>>
>>>>> please always include the assertion patch at
>>>>> https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
>>>>> your tests. Can you include the backtrace from all threads with that patch?
>>>>>
>>>> thanks for your reminder about the assertion patch. Here is the
>>>> backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio:
>>>> refactor host notifiers",
>>>
>>> FWIW, I've been running this in a reboot loop for the last 2 1/2 hours.
>>> Could you perhaps share your command line?
>>
>> From code inspection, the following is also necessary or at least a
>> good idea:
>>
>> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
>> index 6fb29e3..7fa8477 100644
>> --- a/hw/block/dataplane/virtio-blk.c
>> +++ b/hw/block/dataplane/virtio-blk.c
>> @@ -258,7 +258,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
>> aio_context_acquire(s->ctx);
>>
>> /* Stop notifications for new requests from guest */
>> - virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, false, false);
>> + virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, false);
>>
>> /* Drain and switch bs back to the QEMU main loop */
>> blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
>>
>
> Right. Tu Bo, you seem to have the best testcase for this.
> Does your setup runs fine with this on top?
>
> CHristian
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 9:14 ` tu bo
2016-03-29 11:45 ` Cornelia Huck
@ 2016-03-29 11:54 ` Christian Borntraeger
2016-03-31 2:47 ` tu bo
1 sibling, 1 reply; 23+ messages in thread
From: Christian Borntraeger @ 2016-03-29 11:54 UTC (permalink / raw)
To: tu bo, Paolo Bonzini, Cornelia Huck, qemu-devel; +Cc: famz, stefanha, mst
On 03/29/2016 11:14 AM, tu bo wrote:
> Hi Paolo:
>
> On 03/29/2016 02:11 AM, Paolo Bonzini wrote:
>> On 28/03/2016 05:55, TU BO wrote:
>>> Hi Cornelia:
>>>
>>> I got two crash with qemu master + "[PATCH 0/6] virtio: refactor host
>>> notifiers",
>>
>> Hi Tu Bo,
>>
>> please always include the assertion patch at
>> https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
>> your tests. Can you include the backtrace from all threads with that patch?
>>
> thanks for your reminder about the assertion patch. Here is the backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio: refactor host notifiers",
>
> I got two crashes,
>
> 1. For 1st crash,
> (gdb) thread apply all bt
>
> Thread 8 (Thread 0x3ff8daf1910 (LWP 52859)):
> #0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x3ff88000fa8, ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa2d6803e4 in worker_thread (opaque=0x3ff88000f40) at thread-pool.c:92
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 7 (Thread 0x3ff8e679910 (LWP 52856)):
> #0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x2aa2e1fbfa8, ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa2d6803e4 in worker_thread (opaque=0x2aa2e1fbf40) at thread-pool.c:92
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 6 (Thread 0x3ff9497f910 (LWP 52850)):
> #0 0x000003ff9718c50e in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
> #1 0x000003ff96d19792 in g_cond_wait () from /lib64/libglib-2.0.so.0
> #2 0x000002aa2d7165d2 in wait_for_trace_records_available () at trace/simple.c:147
> ---Type <return> to continue, or q <return> to quit---
> #3 writeout_thread (opaque=<optimized out>) at trace/simple.c:165
> #4 0x000003ff96cfa44c in g_thread_proxy () from /lib64/libglib-2.0.so.0
> #5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 5 (Thread 0x3ff8efff910 (LWP 52855)):
> #0 0x000003ff967f819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa2d546f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa2e239030, type=type@entry=44672)
> at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa2d54701e in kvm_cpu_exec (cpu=0x2aa2e239030) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa2d533cd6 in qemu_kvm_cpu_thread_fn (arg=<optimized out>) at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 4 (Thread 0x3ff951ff910 (LWP 52849)):
> #0 0x000003ff967fcf56 in syscall () from /lib64/libc.so.6
> #1 0x000002aa2d755a36 in futex_wait (val=<optimized out>, ev=<optimized out>) at util/qemu-thread-posix.c:292
> #2 qemu_event_wait (ev=0x2aa2ddb5914 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
> #3 0x000002aa2d765002 in call_rcu_thread (opaque=<optimized out>) at util/rcu.c:250
> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
> ---Type <return> to continue, or q <return> to quit---
>
> Thread 3 (Thread 0x3ff978e0bf0 (LWP 52845)):
> #0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
> #1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1) at qemu-timer.c:313
> #3 0x000002aa2d688b02 in os_host_main_loop_wait (timeout=-1) at main-loop.c:251
> #4 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:505
> #5 0x000002aa2d4faade in main_loop () at vl.c:1933
> #6 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4646
>
> Thread 2 (Thread 0x3ff8ffff910 (LWP 52851)):
> #0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
> #1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1) at qemu-timer.c:313
> #3 0x000002aa2d68a788 in aio_poll (ctx=0x2aa2de77e00, blocking=<optimized out>) at aio-posix.c:453
> #4 0x000002aa2d5b909c in iothread_run (opaque=0x2aa2de77220) at iothread.c:46
> #5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 1 (Thread 0x3ff8f7ff910 (LWP 52854)):
> #0 0x000003ff9673b650 in raise () from /lib64/libc.so.6
> ---Type <return> to continue, or q <return> to quit---
> #1 0x000003ff9673ced8 in abort () from /lib64/libc.so.6
> #2 0x000003ff96733666 in __assert_fail_base () from /lib64/libc.so.6
> #3 0x000003ff967336f4 in __assert_fail () from /lib64/libc.so.6
> #4 0x000002aa2d562608 in virtio_blk_handle_output (vdev=<optimized out>, vq=<optimized out>)
> at /usr/src/debug/qemu-2.5.50/hw/block/virtio-blk.c:595
Hmmm, are you sure that you used the newly compiled qemu and not the one from our internal daily rpms
that we have?
> #5 0x000002aa2d587464 in virtio_ccw_hcall_notify (args=<optimized out>) at /usr/src/debug/qemu-2.5.50/hw/s390x/s390-virtio-ccw.c:64
> #6 0x000002aa2d58236c in s390_virtio_hypercall (env=0x2aa2e205660) at /usr/src/debug/qemu-2.5.50/hw/s390x/s390-virtio-hcall.c:35
> #7 0x000002aa2d5b0920 in handle_hypercall (run=<optimized out>, cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1258
> #8 handle_diag (ipb=<optimized out>, run=0x3ff94080000, cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1327
> #9 handle_instruction (run=0x3ff94080000, cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1774
> #10 handle_intercept (cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:1817
> #11 kvm_arch_handle_exit (cs=<optimized out>, run=<optimized out>) at /usr/src/debug/qemu-2.5.50/target-s390x/kvm.c:2003
> #12 0x000002aa2d547072 in kvm_cpu_exec (cpu=0x2aa2e1fd390) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1921
> #13 0x000002aa2d533cd6 in qemu_kvm_cpu_thread_fn (arg=<optimized out>) at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #14 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
> #15 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>
>
>
> 2. For 2nd crash,
> (gdb) thread apply all bt
>
> Thread 10 (Thread 0x3ffacdff910 (LWP 52818)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09d7d390, type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09d7d390) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09d7d390) at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 9 (Thread 0x3ff5feff910 (LWP 52819)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09db91d0, type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09db91d0) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09db91d0) at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 8 (Thread 0x3ff5e6ff910 (LWP 52822)):
> #0 0x000003ffb068ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ffb068ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa092d5868 in qemu_sem_timedwait (sem=0x2aa09dfc288, ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa092003e4 in worker_thread (opaque=0x2aa09dfc220) at thread-pool.c:92
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 7 (Thread 0x3ffade7f910 (LWP 52814)):
> #0 0x000003ffb068c50e in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
> #1 0x000003ffb0219792 in g_cond_wait () from /lib64/libglib-2.0.so.0
> #2 0x000002aa092965d2 in wait_for_trace_records_available () at trace/simple.c:147
> #3 writeout_thread (opaque=<optimized out>) at trace/simple.c:165
> #4 0x000003ffb01fa44c in g_thread_proxy () from /lib64/libglib-2.0.so.0
> #5 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #6 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 6 (Thread 0x3ff5dcfd910 (LWP 52824)):
> #0 0x000003ffb068ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
> #1 0x000003ffb068ed76 in sem_timedwait () from /lib64/libpthread.so.0
> #2 0x000002aa092d5868 in qemu_sem_timedwait (sem=0x3ffa0000fa8, ms=<optimized out>) at util/qemu-thread-posix.c:245
> #3 0x000002aa092003e4 in worker_thread (opaque=0x3ffa0000f40) at thread-pool.c:92
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 5 (Thread 0x3ffae6ff910 (LWP 52813)):
> #0 0x000003ffafcfcf56 in syscall () from /lib64/libc.so.6
> #1 0x000002aa092d5a36 in futex_wait (val=<optimized out>, ev=<optimized out>) at util/qemu-thread-posix.c:292
> #2 qemu_event_wait (ev=0x2aa09935914 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
> #3 0x000002aa092e5002 in call_rcu_thread (opaque=<optimized out>) at util/rcu.c:250
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 4 (Thread 0x3ff5f6ff910 (LWP 52820)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09dcb490, type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09dcb490) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09dcb490) at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
> ---Type <return> to continue, or q <return> to quit---
>
> Thread 3 (Thread 0x3ffb0de0bf0 (LWP 52773)):
> #0 0x000003ffafcf66e6 in ppoll () from /lib64/libc.so.6
> #1 0x000002aa0920928e in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1) at qemu-timer.c:313
> #3 0x000002aa09208b02 in os_host_main_loop_wait (timeout=-1) at main-loop.c:251
> #4 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:505
> #5 0x000002aa0907aade in main_loop () at vl.c:1933
> #6 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4646
>
> Thread 2 (Thread 0x3ff5eeff910 (LWP 52821)):
> #0 0x000003ffafcf819a in ioctl () from /lib64/libc.so.6
> #1 0x000002aa090c6f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa09ddd750, type=type@entry=44672) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
> #2 0x000002aa090c701e in kvm_cpu_exec (cpu=0x2aa09ddd750) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
> #3 0x000002aa090b3cd6 in qemu_kvm_cpu_thread_fn (arg=0x2aa09ddd750) at /usr/src/debug/qemu-2.5.50/cpus.c:1056
> #4 0x000003ffb06884c6 in start_thread () from /lib64/libpthread.so.0
> #5 0x000003ffafd02ec2 in thread_start () from /lib64/libc.so.6
>
> Thread 1 (Thread 0x3ffad67f910 (LWP 52815)):
> #0 blk_aio_read_entry (opaque=0x0) at block/block-backend.c:922
> #1 0x000002aa092e5f6e in coroutine_trampoline (i0=<optimized out>, i1=1342188224) at util/coroutine-ucontext.c:78
> #2 0x000003ffafc5150a in __makecontext_ret () from /lib64/libc.so.6
>
>
>
>
>> Thanks,
>>
>> Paolo
>>
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 11:54 ` Christian Borntraeger
@ 2016-03-31 2:47 ` tu bo
0 siblings, 0 replies; 23+ messages in thread
From: tu bo @ 2016-03-31 2:47 UTC (permalink / raw)
To: Christian Borntraeger, Paolo Bonzini, Cornelia Huck, qemu-devel
Cc: famz, stefanha, mst
Hi Christian:
On 03/29/2016 07:54 PM, Christian Borntraeger wrote:
> On 03/29/2016 11:14 AM, tu bo wrote:
>> Hi Paolo:
>>
>> On 03/29/2016 02:11 AM, Paolo Bonzini wrote:
>>> On 28/03/2016 05:55, TU BO wrote:
>>>> Hi Cornelia:
>>>>
>>>> I got two crash with qemu master + "[PATCH 0/6] virtio: refactor host
>>>> notifiers",
>>>
>>> Hi Tu Bo,
>>>
>>> please always include the assertion patch at
>>> https://lists.gnu.org/archive/html/qemu-block/2016-03/msg00546.html in
>>> your tests. Can you include the backtrace from all threads with that patch?
>>>
>> thanks for your reminder about the assertion patch. Here is the backtrace with qemu master + assertion patch + "[PATCH 0/6] virtio: refactor host notifiers",
>>
>> I got two crashes,
>>
>> 1. For 1st crash,
>> (gdb) thread apply all bt
>>
>> Thread 8 (Thread 0x3ff8daf1910 (LWP 52859)):
>> #0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
>> #1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
>> #2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x3ff88000fa8, ms=<optimized out>) at util/qemu-thread-posix.c:245
>> #3 0x000002aa2d6803e4 in worker_thread (opaque=0x3ff88000f40) at thread-pool.c:92
>> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
>> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>>
>> Thread 7 (Thread 0x3ff8e679910 (LWP 52856)):
>> #0 0x000003ff9718ec62 in do_futex_timed_wait () from /lib64/libpthread.so.0
>> #1 0x000003ff9718ed76 in sem_timedwait () from /lib64/libpthread.so.0
>> #2 0x000002aa2d755868 in qemu_sem_timedwait (sem=0x2aa2e1fbfa8, ms=<optimized out>) at util/qemu-thread-posix.c:245
>> #3 0x000002aa2d6803e4 in worker_thread (opaque=0x2aa2e1fbf40) at thread-pool.c:92
>> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
>> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>>
>> Thread 6 (Thread 0x3ff9497f910 (LWP 52850)):
>> #0 0x000003ff9718c50e in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
>> #1 0x000003ff96d19792 in g_cond_wait () from /lib64/libglib-2.0.so.0
>> #2 0x000002aa2d7165d2 in wait_for_trace_records_available () at trace/simple.c:147
>> ---Type <return> to continue, or q <return> to quit---
>> #3 writeout_thread (opaque=<optimized out>) at trace/simple.c:165
>> #4 0x000003ff96cfa44c in g_thread_proxy () from /lib64/libglib-2.0.so.0
>> #5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
>> #6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>>
>> Thread 5 (Thread 0x3ff8efff910 (LWP 52855)):
>> #0 0x000003ff967f819a in ioctl () from /lib64/libc.so.6
>> #1 0x000002aa2d546f3e in kvm_vcpu_ioctl (cpu=cpu@entry=0x2aa2e239030, type=type@entry=44672)
>> at /usr/src/debug/qemu-2.5.50/kvm-all.c:1984
>> #2 0x000002aa2d54701e in kvm_cpu_exec (cpu=0x2aa2e239030) at /usr/src/debug/qemu-2.5.50/kvm-all.c:1834
>> #3 0x000002aa2d533cd6 in qemu_kvm_cpu_thread_fn (arg=<optimized out>) at /usr/src/debug/qemu-2.5.50/cpus.c:1056
>> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
>> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>>
>> Thread 4 (Thread 0x3ff951ff910 (LWP 52849)):
>> #0 0x000003ff967fcf56 in syscall () from /lib64/libc.so.6
>> #1 0x000002aa2d755a36 in futex_wait (val=<optimized out>, ev=<optimized out>) at util/qemu-thread-posix.c:292
>> #2 qemu_event_wait (ev=0x2aa2ddb5914 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
>> #3 0x000002aa2d765002 in call_rcu_thread (opaque=<optimized out>) at util/rcu.c:250
>> #4 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
>> #5 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>> ---Type <return> to continue, or q <return> to quit---
>>
>> Thread 3 (Thread 0x3ff978e0bf0 (LWP 52845)):
>> #0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
>> #1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
>> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1) at qemu-timer.c:313
>> #3 0x000002aa2d688b02 in os_host_main_loop_wait (timeout=-1) at main-loop.c:251
>> #4 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:505
>> #5 0x000002aa2d4faade in main_loop () at vl.c:1933
>> #6 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4646
>>
>> Thread 2 (Thread 0x3ff8ffff910 (LWP 52851)):
>> #0 0x000003ff967f66e6 in ppoll () from /lib64/libc.so.6
>> #1 0x000002aa2d68928e in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
>> #2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=-1) at qemu-timer.c:313
>> #3 0x000002aa2d68a788 in aio_poll (ctx=0x2aa2de77e00, blocking=<optimized out>) at aio-posix.c:453
>> #4 0x000002aa2d5b909c in iothread_run (opaque=0x2aa2de77220) at iothread.c:46
>> #5 0x000003ff971884c6 in start_thread () from /lib64/libpthread.so.0
>> #6 0x000003ff96802ec2 in thread_start () from /lib64/libc.so.6
>>
>> Thread 1 (Thread 0x3ff8f7ff910 (LWP 52854)):
>> #0 0x000003ff9673b650 in raise () from /lib64/libc.so.6
>> ---Type <return> to continue, or q <return> to quit---
>> #1 0x000003ff9673ced8 in abort () from /lib64/libc.so.6
>> #2 0x000003ff96733666 in __assert_fail_base () from /lib64/libc.so.6
>> #3 0x000003ff967336f4 in __assert_fail () from /lib64/libc.so.6
>> #4 0x000002aa2d562608 in virtio_blk_handle_output (vdev=<optimized out>, vq=<optimized out>)
>> at /usr/src/debug/qemu-2.5.50/hw/block/virtio-blk.c:595
>
> Hmmm, are you sure that you used the newly compiled qemu and not the one from our internal daily rpms
> that we have?
>
yes, I got the latest qemu master from tuxmaker, then compiled and
installed the newly compiled qemu for my box. thx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-24 16:15 [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers Cornelia Huck
` (8 preceding siblings ...)
2016-03-28 3:55 ` TU BO
@ 2016-03-29 13:23 ` Christian Borntraeger
2016-03-29 13:38 ` Michael S. Tsirkin
9 siblings, 1 reply; 23+ messages in thread
From: Christian Borntraeger @ 2016-03-29 13:23 UTC (permalink / raw)
To: Cornelia Huck, qemu-devel; +Cc: tubo, pbonzini, famz, stefanha, mst
On 03/24/2016 05:15 PM, Cornelia Huck wrote:
> Here's the next version of my refactoring of the virtio host notifiers.
> This one actually survives a bit of testing for me (reboot loop).
>
> As this patchset fixes a latent bug exposed by the recent dataplane
> changes (we have a deassigned ioeventfd for a short period of time
> during dataplane start, which leads to the virtqueue handler being
> called in both the vcpu thread and the iothread simultaneously), I'd
> like to see this in 2.6.
>
> Changes from RFC:
> - Fixed some silly errors (checking for !disabled instead of disabled,
> virtio_ccw_stop_ioeventfd() calling virtio_bus_start_ioeventfd()).
> - Completely reworked set_host_notifier(): We only want to set/unset
> the actual handler function and don't want to do anything to the
> ioeventfd backing, so reduce the function to actually doing only
> that.
> - With the change above, we can lose the 'assign' parameter in
> virtio_bus_stop_ioeventfd() again.
> - Added more comments that hopefully make it clearer what is going on.
>
> I'd appreciate it if people could give it some testing; I'll be back
> to look at the fallout after Easter.
>
> Cornelia Huck (6):
> virtio-bus: common ioeventfd infrastructure
> virtio-bus: have callers tolerate new host notifier api
> virtio-ccw: convert to ioeventfd callbacks
> virtio-pci: convert to ioeventfd callbacks
> virtio-mmio: convert to ioeventfd callbacks
> virtio-bus: remove old set_host_notifier callback
>
> hw/block/dataplane/virtio-blk.c | 6 +-
> hw/s390x/virtio-ccw.c | 133 ++++++++++++++--------------------------
> hw/scsi/virtio-scsi-dataplane.c | 9 ++-
> hw/virtio/vhost.c | 13 ++--
> hw/virtio/virtio-bus.c | 132 +++++++++++++++++++++++++++++++++++++++
> hw/virtio/virtio-mmio.c | 128 +++++++++++++-------------------------
> hw/virtio/virtio-pci.c | 124 +++++++++++++------------------------
> include/hw/virtio/virtio-bus.h | 31 +++++++++-
> 8 files changed, 303 insertions(+), 273 deletions(-)
>
FWIW, I went back to the old F20 installation. Without your patch set qemu crashes
pretty soon, with your patch set it runs stable.
How intrusive would it be to provide the fix 3 times (for each transport) and
do the refactoring after 2.6? If the result would be big as well I think
this patch set is still the right thing to do for 2.6 unless MST has a
small and beautiful 2.6fix.
Christian
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [PATCH 0/6] virtio: refactor host notifiers
2016-03-29 13:23 ` Christian Borntraeger
@ 2016-03-29 13:38 ` Michael S. Tsirkin
0 siblings, 0 replies; 23+ messages in thread
From: Michael S. Tsirkin @ 2016-03-29 13:38 UTC (permalink / raw)
To: Christian Borntraeger
Cc: famz, qemu-devel, tubo, stefanha, Cornelia Huck, pbonzini
On Tue, Mar 29, 2016 at 03:23:57PM +0200, Christian Borntraeger wrote:
> On 03/24/2016 05:15 PM, Cornelia Huck wrote:
> > Here's the next version of my refactoring of the virtio host notifiers.
> > This one actually survives a bit of testing for me (reboot loop).
> >
> > As this patchset fixes a latent bug exposed by the recent dataplane
> > changes (we have a deassigned ioeventfd for a short period of time
> > during dataplane start, which leads to the virtqueue handler being
> > called in both the vcpu thread and the iothread simultaneously), I'd
> > like to see this in 2.6.
> >
> > Changes from RFC:
> > - Fixed some silly errors (checking for !disabled instead of disabled,
> > virtio_ccw_stop_ioeventfd() calling virtio_bus_start_ioeventfd()).
> > - Completely reworked set_host_notifier(): We only want to set/unset
> > the actual handler function and don't want to do anything to the
> > ioeventfd backing, so reduce the function to actually doing only
> > that.
> > - With the change above, we can lose the 'assign' parameter in
> > virtio_bus_stop_ioeventfd() again.
> > - Added more comments that hopefully make it clearer what is going on.
> >
> > I'd appreciate it if people could give it some testing; I'll be back
> > to look at the fallout after Easter.
> >
> > Cornelia Huck (6):
> > virtio-bus: common ioeventfd infrastructure
> > virtio-bus: have callers tolerate new host notifier api
> > virtio-ccw: convert to ioeventfd callbacks
> > virtio-pci: convert to ioeventfd callbacks
> > virtio-mmio: convert to ioeventfd callbacks
> > virtio-bus: remove old set_host_notifier callback
> >
> > hw/block/dataplane/virtio-blk.c | 6 +-
> > hw/s390x/virtio-ccw.c | 133 ++++++++++++++--------------------------
> > hw/scsi/virtio-scsi-dataplane.c | 9 ++-
> > hw/virtio/vhost.c | 13 ++--
> > hw/virtio/virtio-bus.c | 132 +++++++++++++++++++++++++++++++++++++++
> > hw/virtio/virtio-mmio.c | 128 +++++++++++++-------------------------
> > hw/virtio/virtio-pci.c | 124 +++++++++++++------------------------
> > include/hw/virtio/virtio-bus.h | 31 +++++++++-
> > 8 files changed, 303 insertions(+), 273 deletions(-)
> >
>
> FWIW, I went back to the old F20 installation. Without your patch set qemu crashes
> pretty soon, with your patch set it runs stable.
> How intrusive would it be to provide the fix 3 times (for each transport) and
> do the refactoring after 2.6? If the result would be big as well I think
> this patch set is still the right thing to do for 2.6 unless MST has a
> small and beautiful 2.6fix.
Definitely not beautiful but small.
It's hard for me to see what is meant here, but if it's
even bigger than this patch then I'd be worried.
>
>
> Christian
^ permalink raw reply [flat|nested] 23+ messages in thread