qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/3] virtio and vhost error handling
@ 2011-03-28 21:13 Michael S. Tsirkin
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors Michael S. Tsirkin
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2011-03-28 21:13 UTC (permalink / raw)
  To: qemu-devel, Anthony Liguori, gleb, Jason Wang, Alex Williamson,
	Jes Sorensen, Amit Shah, Christoph Hellwig, armbru, kwolf

This patchset makes virtio and vhost more robust
in face of broken/malicious guests.

Lightly tested.

Pls review.

Michael S. Tsirkin (3):
  virtio: don't exit on guest errors
  vhost: don't exit on memory errors
  vhost: roll our own cpu map variant

 hw/vhost.c             |   74 ++++++++++++++++++++++++++++++++++----------
 hw/vhost.h             |    1 +
 hw/virtio-blk.c        |   12 +++++--
 hw/virtio-serial-bus.c |   13 +++++--
 hw/virtio.c            |   79 +++++++++++++++++++++++++++++++-----------------
 hw/virtio.h            |    7 +++-
 6 files changed, 131 insertions(+), 55 deletions(-)

-- 
1.7.3.2.91.g446ac

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors
  2011-03-28 21:13 [Qemu-devel] [PATCH 0/3] virtio and vhost error handling Michael S. Tsirkin
@ 2011-03-28 21:14 ` Michael S. Tsirkin
  2011-03-29 10:33   ` Amit Shah
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 2/3] vhost: don't exit on memory errors Michael S. Tsirkin
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant Michael S. Tsirkin
  2 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2011-03-28 21:14 UTC (permalink / raw)
  To: qemu-devel, Anthony Liguori, gleb, Jason Wang, Alex Williamson,
	Jes Sorensen, Amit Shah, Christoph Hellwig, armbru, kwolf

When guest does something illegal, such as
programming invalid index values in the virtio
device, qemu currently tends to crash.

With virtio, a better idea is to log an error,
and set status to FAIL which stops the device.

Add an API to do this, and fix core, blk and serial
to use it on error.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/virtio-blk.c        |   12 +++++--
 hw/virtio-serial-bus.c |   13 +++++--
 hw/virtio.c            |   79 +++++++++++++++++++++++++++++++-----------------
 hw/virtio.h            |    7 +++-
 4 files changed, 73 insertions(+), 38 deletions(-)

diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c
index cff21a9..fdaaf89 100644
--- a/hw/virtio-blk.c
+++ b/hw/virtio-blk.c
@@ -502,10 +502,14 @@ static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id)
         req->next = s->rq;
         s->rq = req;
 
-        virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr,
-            req->elem.in_num, 1);
-        virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr,
-            req->elem.out_num, 0);
+        if (virtqueue_map_sg(s->vq, req->elem.in_sg, req->elem.in_addr,
+                             req->elem.in_num, 1)) {
+            return -EINVAL;
+        }
+        if (virtqueue_map_sg(s->vq, req->elem.out_sg, req->elem.out_addr,
+                             req->elem.out_num, 0)) {
+            return -EINVAL;
+        }
     }
 
     return 0;
diff --git a/hw/virtio-serial-bus.c b/hw/virtio-serial-bus.c
index 7ae2b0d..8807a2f 100644
--- a/hw/virtio-serial-bus.c
+++ b/hw/virtio-serial-bus.c
@@ -679,10 +679,15 @@ static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id)
 
                 qemu_get_buffer(f, (unsigned char *)&port->elem,
                                 sizeof(port->elem));
-                virtqueue_map_sg(port->elem.in_sg, port->elem.in_addr,
-                                 port->elem.in_num, 1);
-                virtqueue_map_sg(port->elem.out_sg, port->elem.out_addr,
-                                 port->elem.out_num, 1);
+                if (virtqueue_map_sg(port->ivq, port->elem.in_sg,
+                                     port->elem.in_addr,
+                                     port->elem.in_num, 1)) {
+                    return -EINVAL;
+                }
+                if (virtqueue_map_sg(port->ovq, port->elem.out_sg, port->elem.out_addr,
+                                 port->elem.out_num, 1)) {
+                    return -EINVAL;
+                }
 
                 /*
                  *  Port was throttled on source machine.  Let's
diff --git a/hw/virtio.c b/hw/virtio.c
index d5013b6..d1c8769 100644
--- a/hw/virtio.c
+++ b/hw/virtio.c
@@ -16,6 +16,7 @@
 #include "trace.h"
 #include "virtio.h"
 #include "sysemu.h"
+#include "qemu-error.h"
 
 /* The alignment to use between consumer and producer parts of vring.
  * x86 pagesize again. */
@@ -253,15 +254,15 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
 
     /* Check it isn't doing very strange things with descriptor numbers. */
     if (num_heads > vq->vring.num) {
-        fprintf(stderr, "Guest moved used index from %u to %u",
-                idx, vring_avail_idx(vq));
-        exit(1);
+        virtio_error(vq->vdev, "Guest moved used index from %u to %u",
+                     idx, vring_avail_idx(vq));
+        return 0;
     }
 
     return num_heads;
 }
 
-static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
+static int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
 {
     unsigned int head;
 
@@ -271,14 +272,14 @@ static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
 
     /* If their number is silly, that's a fatal mistake. */
     if (head >= vq->vring.num) {
-        fprintf(stderr, "Guest says index %u is available", head);
-        exit(1);
+        virtio_error(vq->vdev, "Guest says index %u is available", head);
+        return -1;
     }
 
     return head;
 }
 
-static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
+static unsigned virtqueue_next_desc(VirtQueue *vq, target_phys_addr_t desc_pa,
                                     unsigned int i, unsigned int max)
 {
     unsigned int next;
@@ -293,8 +294,8 @@ static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
     wmb();
 
     if (next >= max) {
-        fprintf(stderr, "Desc next is %u", next);
-        exit(1);
+        virtio_error(vq->vdev, "Desc next is %u", next);
+        return max;
     }
 
     return next;
@@ -316,18 +317,21 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
         max = vq->vring.num;
         num_bufs = total_bufs;
         i = virtqueue_get_head(vq, idx++);
+        if (i < 0) {
+            return 0;
+        }
         desc_pa = vq->vring.desc;
 
         if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
             if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
-                fprintf(stderr, "Invalid size for indirect buffer table\n");
-                exit(1);
+                virtio_error(vq->vdev, "Invalid size for indirect buffer table\n");
+                return 0;
             }
 
             /* If we've got too many, that implies a descriptor loop. */
             if (num_bufs >= max) {
-                fprintf(stderr, "Looped descriptor");
-                exit(1);
+                virtio_error(vq->vdev, "Looped descriptor");
+                return 0;
             }
 
             /* loop over the indirect descriptor table */
@@ -340,8 +344,8 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
         do {
             /* If we've got too many, that implies a descriptor loop. */
             if (++num_bufs > max) {
-                fprintf(stderr, "Looped descriptor");
-                exit(1);
+                virtio_error(vq->vdev, "Looped descriptor");
+                return 0;
             }
 
             if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
@@ -353,7 +357,7 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
                     (out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
                     return 1;
             }
-        } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
+        } while ((i = virtqueue_next_desc(vq, desc_pa, i, max)) != max);
 
         if (!indirect)
             total_bufs = num_bufs;
@@ -364,8 +368,8 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
     return 0;
 }
 
-void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
-    size_t num_sg, int is_write)
+int virtqueue_map_sg(VirtQueue *vq, struct iovec *sg,
+                     target_phys_addr_t *addr, size_t num_sg, int is_write)
 {
     unsigned int i;
     target_phys_addr_t len;
@@ -374,15 +378,16 @@ void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
         len = sg[i].iov_len;
         sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
         if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
-            fprintf(stderr, "virtio: trying to map MMIO memory\n");
-            exit(1);
+            return -1;
         }
     }
+    return 0;
 }
 
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
 {
-    unsigned int i, head, max;
+    int i, head;
+    unsigned max;
     target_phys_addr_t desc_pa = vq->vring.desc;
 
     if (!virtqueue_num_heads(vq, vq->last_avail_idx))
@@ -394,11 +399,14 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
     max = vq->vring.num;
 
     i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
+    if (i < 0) {
+        return 0;
+    }
 
     if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
         if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
-            fprintf(stderr, "Invalid size for indirect buffer table\n");
-            exit(1);
+            virtio_error(vq->vdev, "Invalid size for indirect buffer table\n");
+            return 0;
         }
 
         /* loop over the indirect descriptor table */
@@ -423,14 +431,18 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
 
         /* If we've got too many, that implies a descriptor loop. */
         if ((elem->in_num + elem->out_num) > max) {
-            fprintf(stderr, "Looped descriptor");
-            exit(1);
+            virtio_error(vq->vdev, "Looped descriptor");
+            return 0;
         }
-    } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
+    } while ((i = virtqueue_next_desc(vq, desc_pa, i, max)) != max);
 
     /* Now map what we have collected */
-    virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
-    virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
+    if (virtqueue_map_sg(vq, elem->in_sg, elem->in_addr, elem->in_num, 1)) {
+        return 0;
+    }
+    if (virtqueue_map_sg(vq, elem->out_sg, elem->out_addr, elem->out_num, 0)) {
+        return 0;
+    }
 
     elem->index = head;
 
@@ -863,3 +875,14 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
 {
     return &vq->host_notifier;
 }
+
+void virtio_error(VirtIODevice *vdev, const char *fmt, ...)
+{
+    va_list ap;
+
+    virtio_set_status(vdev, VIRTIO_CONFIG_S_FAILED);
+
+    va_start(ap, fmt);
+    error_vprintf(fmt, ap);
+    va_end(ap);
+}
diff --git a/hw/virtio.h b/hw/virtio.h
index 6d3d960..9771a9c 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -134,6 +134,9 @@ static inline void virtio_set_status(VirtIODevice *vdev, uint8_t val)
     vdev->status = val;
 }
 
+void virtio_error(VirtIODevice *vdev, const char *fmt, ...)
+    __attribute__ ((format(printf, 2, 3)));
+
 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
                             void (*handle_output)(VirtIODevice *,
                                                   VirtQueue *));
@@ -144,8 +147,8 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count);
 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
                     unsigned int len, unsigned int idx);
 
-void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
-    size_t num_sg, int is_write);
+int virtqueue_map_sg(VirtQueue *vq, struct iovec *sg,
+                     target_phys_addr_t *addr, size_t num_sg, int is_write);
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
 int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes);
 
-- 
1.7.3.2.91.g446ac

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 2/3] vhost: don't exit on memory errors
  2011-03-28 21:13 [Qemu-devel] [PATCH 0/3] virtio and vhost error handling Michael S. Tsirkin
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors Michael S. Tsirkin
@ 2011-03-28 21:14 ` Michael S. Tsirkin
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant Michael S. Tsirkin
  2 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2011-03-28 21:14 UTC (permalink / raw)
  To: qemu-devel, Anthony Liguori, gleb, Jason Wang, Alex Williamson,
	Jes Sorensen, Amit Shah, Christoph Hellwig, armbru, kwolf

When memory including one of the VQs
goes away, handle that as a guest error
instead of exiting qemu.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/vhost.c |    8 +++++---
 hw/vhost.h |    1 +
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index 97a1299..c17a831 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -287,11 +287,11 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
         l = vq->ring_size;
         p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
         if (!p || l != vq->ring_size) {
-            fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
+            virtio_error(dev->vdev, "Unable to map ring buffer for ring %d\n", i);
             return -ENOMEM;
         }
         if (p != vq->ring) {
-            fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
+            virtio_error(dev->vdev, "Ring buffer relocated for ring %d\n", i);
             return -EBUSY;
         }
         cpu_physical_memory_unmap(p, l, 0, 0);
@@ -330,7 +330,9 @@ static void vhost_client_set_memory(CPUPhysMemoryClient *client,
 
     if (dev->started) {
         r = vhost_verify_ring_mappings(dev, start_addr, size);
-        assert(r >= 0);
+        if (r < 0) {
+            return;
+        }
     }
 
     if (!dev->log_enabled) {
diff --git a/hw/vhost.h b/hw/vhost.h
index c8c595a..90b5bc8 100644
--- a/hw/vhost.h
+++ b/hw/vhost.h
@@ -39,6 +39,7 @@ struct vhost_dev {
     vhost_log_chunk_t *log;
     unsigned long long log_size;
     bool force;
+    VirtIODevice *vdev;
 };
 
 int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force);
-- 
1.7.3.2.91.g446ac

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant
  2011-03-28 21:13 [Qemu-devel] [PATCH 0/3] virtio and vhost error handling Michael S. Tsirkin
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors Michael S. Tsirkin
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 2/3] vhost: don't exit on memory errors Michael S. Tsirkin
@ 2011-03-28 21:14 ` Michael S. Tsirkin
  2011-03-29 10:53   ` Stefan Hajnoczi
  2 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2011-03-28 21:14 UTC (permalink / raw)
  To: qemu-devel, Anthony Liguori, gleb, Jason Wang, Alex Williamson,
	Jes Sorensen, Amit Shah, Christoph Hellwig, armbru, kwolf

vhost used cpu_physical_memory_map to get the
virtual address for the ring, however,
this will exit on an illegal RAM address.
Since the addresses are guest-controlled, we
shouldn't do that.

Switch to our own variant that uses the vhost
tables and returns an error instead of exiting.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/vhost.c |   66 +++++++++++++++++++++++++++++++++++++++++++++++------------
 1 files changed, 52 insertions(+), 14 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index c17a831..5fd09b5 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -271,6 +271,44 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
     dev->log_size = size;
 }
 
+/* Same as cpu_physical_memory_map but doesn't allocate,
+ * doesn't use a bounce buffer, checks input for errors such
+ * as wrap-around, and does not exit on failure. */
+static void *vhost_memory_map(struct vhost_dev *dev,
+                              uint64_t addr,
+                              uint64_t *size,
+                              int is_write)
+{
+    int i;
+    if (addr + *size < addr) {
+        *size = -addr;
+    }
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        uint64_t rlast, mlast, userspace_addr;
+        if (!range_covers_byte(reg->guest_phys_addr, reg->memory_size, addr)) {
+            continue;
+        }
+        rlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
+        mlast = range_get_last(addr, *size);
+        if (rlast < mlast) {
+            *size -= (mlast - rlast);
+        }
+        userspace_addr = reg->userspace_addr + addr - reg->guest_phys_addr;
+        if ((unsigned long)userspace_addr != userspace_addr) {
+            return NULL;
+        }
+        return (void *)((unsigned long)userspace_addr);
+    }
+    return NULL;
+}
+
+/* Placeholder to keep the API consistent with cpu_physical_memory_unmap. */
+static void vhost_memory_unmap(void *buffer, uint64_t len,
+                               int is_write, uint64_t access_len)
+{
+}
+
 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
                                       uint64_t start_addr,
                                       uint64_t size)
@@ -285,7 +323,7 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
             continue;
         }
         l = vq->ring_size;
-        p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
+        p = vhost_memory_map(dev, vq->ring_phys, &l, 1);
         if (!p || l != vq->ring_size) {
             virtio_error(dev->vdev, "Unable to map ring buffer for ring %d\n", i);
             return -ENOMEM;
@@ -294,7 +332,7 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
             virtio_error(dev->vdev, "Ring buffer relocated for ring %d\n", i);
             return -EBUSY;
         }
-        cpu_physical_memory_unmap(p, l, 0, 0);
+        vhost_memory_unmap(p, l, 0, 0);
     }
     return 0;
 }
@@ -480,21 +518,21 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
 
     s = l = virtio_queue_get_desc_size(vdev, idx);
     a = virtio_queue_get_desc_addr(vdev, idx);
-    vq->desc = cpu_physical_memory_map(a, &l, 0);
+    vq->desc = vhost_memory_map(dev, a, &l, 0);
     if (!vq->desc || l != s) {
         r = -ENOMEM;
         goto fail_alloc_desc;
     }
     s = l = virtio_queue_get_avail_size(vdev, idx);
     a = virtio_queue_get_avail_addr(vdev, idx);
-    vq->avail = cpu_physical_memory_map(a, &l, 0);
+    vq->avail = vhost_memory_map(dev, a, &l, 0);
     if (!vq->avail || l != s) {
         r = -ENOMEM;
         goto fail_alloc_avail;
     }
     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
-    vq->used = cpu_physical_memory_map(a, &l, 1);
+    vq->used = vhost_memory_map(dev, a, &l, 1);
     if (!vq->used || l != s) {
         r = -ENOMEM;
         goto fail_alloc_used;
@@ -502,7 +540,7 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
 
     vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
     vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
-    vq->ring = cpu_physical_memory_map(a, &l, 1);
+    vq->ring = vhost_memory_map(dev, a, &l, 1);
     if (!vq->ring || l != s) {
         r = -ENOMEM;
         goto fail_alloc_ring;
@@ -540,16 +578,16 @@ fail_kick:
     vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
 fail_host_notifier:
 fail_alloc:
-    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
+    vhost_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                               0, 0);
 fail_alloc_ring:
-    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
+    vhost_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                               0, 0);
 fail_alloc_used:
-    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
+    vhost_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                               0, 0);
 fail_alloc_avail:
-    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
+    vhost_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                               0, 0);
 fail_alloc_desc:
     return r;
@@ -577,13 +615,13 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
     }
     virtio_queue_set_last_avail_idx(vdev, idx, state.num);
     assert (r >= 0);
-    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
+    vhost_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                               0, virtio_queue_get_ring_size(vdev, idx));
-    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
+    vhost_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                               1, virtio_queue_get_used_size(vdev, idx));
-    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
+    vhost_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                               0, virtio_queue_get_avail_size(vdev, idx));
-    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
+    vhost_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                               0, virtio_queue_get_desc_size(vdev, idx));
 }
 
-- 
1.7.3.2.91.g446ac

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors Michael S. Tsirkin
@ 2011-03-29 10:33   ` Amit Shah
  0 siblings, 0 replies; 10+ messages in thread
From: Amit Shah @ 2011-03-29 10:33 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kwolf, gleb, Jes Sorensen, Jason Wang, qemu-devel, armbru,
	Christoph Hellwig, Alex Williamson

On (Mon) 28 Mar 2011 [23:14:16], Michael S. Tsirkin wrote:
> When guest does something illegal, such as
> programming invalid index values in the virtio
> device, qemu currently tends to crash.
> 
> With virtio, a better idea is to log an error,
> and set status to FAIL which stops the device.
> 
> Add an API to do this, and fix core, blk and serial
> to use it on error.
> 
> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> ---
>  hw/virtio-blk.c        |   12 +++++--
>  hw/virtio-serial-bus.c |   13 +++++--
>  hw/virtio.c            |   79 +++++++++++++++++++++++++++++++-----------------
>  hw/virtio.h            |    7 +++-
>  4 files changed, 73 insertions(+), 38 deletions(-)

ACK

		Amit

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant
  2011-03-28 21:14 ` [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant Michael S. Tsirkin
@ 2011-03-29 10:53   ` Stefan Hajnoczi
  2011-03-30 16:09     ` Michael S. Tsirkin
  0 siblings, 1 reply; 10+ messages in thread
From: Stefan Hajnoczi @ 2011-03-29 10:53 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kwolf, gleb, Jes Sorensen, Jason Wang, qemu-devel, armbru,
	Christoph Hellwig, Alex Williamson, Amit Shah

On Mon, Mar 28, 2011 at 10:14 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> vhost used cpu_physical_memory_map to get the
> virtual address for the ring, however,
> this will exit on an illegal RAM address.
> Since the addresses are guest-controlled, we
> shouldn't do that.
>
> Switch to our own variant that uses the vhost
> tables and returns an error instead of exiting.

We should make all of QEMU more robust instead of just vhost.  Perhaps
introduce cpu_physical_memory_map_nofail(...) that aborts like the
current cpu_physical_memory_map() implementation and then make non-hw/
users call that one.  hw/ users should check for failure.

Stefan

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant
  2011-03-29 10:53   ` Stefan Hajnoczi
@ 2011-03-30 16:09     ` Michael S. Tsirkin
  2011-03-30 16:26       ` Stefan Hajnoczi
  0 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2011-03-30 16:09 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: kwolf, gleb, Jes Sorensen, Jason Wang, qemu-devel, armbru,
	Christoph Hellwig, Alex Williamson, Amit Shah

On Tue, Mar 29, 2011 at 11:53:54AM +0100, Stefan Hajnoczi wrote:
> On Mon, Mar 28, 2011 at 10:14 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > vhost used cpu_physical_memory_map to get the
> > virtual address for the ring, however,
> > this will exit on an illegal RAM address.
> > Since the addresses are guest-controlled, we
> > shouldn't do that.
> >
> > Switch to our own variant that uses the vhost
> > tables and returns an error instead of exiting.
> 
> We should make all of QEMU more robust instead of just vhost.  Perhaps
> introduce cpu_physical_memory_map_nofail(...) that aborts like the
> current cpu_physical_memory_map() implementation and then make non-hw/
> users call that one.  hw/ users should check for failure.
> 
> Stefan

Yea, well ... at least vhost-net wants to also check
it is given a ram address, not some other physical address.
We could generally replace the memory management in vhost-net
by some other logic, when that's done this one can
go away as well.

-- 
MST

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant
  2011-03-30 16:09     ` Michael S. Tsirkin
@ 2011-03-30 16:26       ` Stefan Hajnoczi
  2011-03-30 16:59         ` Michael S. Tsirkin
  0 siblings, 1 reply; 10+ messages in thread
From: Stefan Hajnoczi @ 2011-03-30 16:26 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kwolf, gleb, Jes Sorensen, Jason Wang, qemu-devel, armbru,
	Christoph Hellwig, Alex Williamson, Amit Shah

On Wed, Mar 30, 2011 at 5:09 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Tue, Mar 29, 2011 at 11:53:54AM +0100, Stefan Hajnoczi wrote:
>> On Mon, Mar 28, 2011 at 10:14 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > vhost used cpu_physical_memory_map to get the
>> > virtual address for the ring, however,
>> > this will exit on an illegal RAM address.
>> > Since the addresses are guest-controlled, we
>> > shouldn't do that.
>> >
>> > Switch to our own variant that uses the vhost
>> > tables and returns an error instead of exiting.
>>
>> We should make all of QEMU more robust instead of just vhost.  Perhaps
>> introduce cpu_physical_memory_map_nofail(...) that aborts like the
>> current cpu_physical_memory_map() implementation and then make non-hw/
>> users call that one.  hw/ users should check for failure.
>>
>> Stefan
>
> Yea, well ... at least vhost-net wants to also check
> it is given a ram address, not some other physical address.
> We could generally replace the memory management in vhost-net
> by some other logic, when that's done this one can
> go away as well.

Sounds like you do not want to refactor physical memory access for
non-vhost.  Fair enough but we have to do it sooner or later in order
to make all of QEMU more robust.  If vhost-net is protected but the
IDE CD-ROM and virtio-blk disk still have issues then we haven't
reached our goal yet.  Any way I can convince you to do a generic API?
:)

Stefan

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant
  2011-03-30 16:26       ` Stefan Hajnoczi
@ 2011-03-30 16:59         ` Michael S. Tsirkin
  2011-03-30 17:59           ` Stefan Hajnoczi
  0 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2011-03-30 16:59 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: kwolf, gleb, Jes Sorensen, Jason Wang, qemu-devel, armbru,
	Christoph Hellwig, Alex Williamson, Amit Shah

On Wed, Mar 30, 2011 at 05:26:22PM +0100, Stefan Hajnoczi wrote:
> On Wed, Mar 30, 2011 at 5:09 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Tue, Mar 29, 2011 at 11:53:54AM +0100, Stefan Hajnoczi wrote:
> >> On Mon, Mar 28, 2011 at 10:14 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> >> > vhost used cpu_physical_memory_map to get the
> >> > virtual address for the ring, however,
> >> > this will exit on an illegal RAM address.
> >> > Since the addresses are guest-controlled, we
> >> > shouldn't do that.
> >> >
> >> > Switch to our own variant that uses the vhost
> >> > tables and returns an error instead of exiting.
> >>
> >> We should make all of QEMU more robust instead of just vhost.  Perhaps
> >> introduce cpu_physical_memory_map_nofail(...) that aborts like the
> >> current cpu_physical_memory_map() implementation and then make non-hw/
> >> users call that one.  hw/ users should check for failure.
> >>
> >> Stefan
> >
> > Yea, well ... at least vhost-net wants to also check
> > it is given a ram address, not some other physical address.
> > We could generally replace the memory management in vhost-net
> > by some other logic, when that's done this one can
> > go away as well.
> 
> Sounds like you do not want to refactor physical memory access for
> non-vhost.  Fair enough but we have to do it sooner or later in order
> to make all of QEMU more robust.  If vhost-net is protected but the
> IDE CD-ROM and virtio-blk disk still have issues then we haven't
> reached our goal yet.  Any way I can convince you to do a generic API?
> :)
> 
> Stefan

If you are talking about splitting real ram from non ram
and creating a generic API for that, you don't need to convince me,
but I can't commit to implementing it right now.

-- 
MST

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant
  2011-03-30 16:59         ` Michael S. Tsirkin
@ 2011-03-30 17:59           ` Stefan Hajnoczi
  0 siblings, 0 replies; 10+ messages in thread
From: Stefan Hajnoczi @ 2011-03-30 17:59 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kwolf, gleb, Jes Sorensen, Jason Wang, qemu-devel, armbru,
	Christoph Hellwig, Alex Williamson, Amit Shah

On Wed, Mar 30, 2011 at 5:59 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Wed, Mar 30, 2011 at 05:26:22PM +0100, Stefan Hajnoczi wrote:
>> On Wed, Mar 30, 2011 at 5:09 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Tue, Mar 29, 2011 at 11:53:54AM +0100, Stefan Hajnoczi wrote:
>> >> On Mon, Mar 28, 2011 at 10:14 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> >> > vhost used cpu_physical_memory_map to get the
>> >> > virtual address for the ring, however,
>> >> > this will exit on an illegal RAM address.
>> >> > Since the addresses are guest-controlled, we
>> >> > shouldn't do that.
>> >> >
>> >> > Switch to our own variant that uses the vhost
>> >> > tables and returns an error instead of exiting.
>> >>
>> >> We should make all of QEMU more robust instead of just vhost.  Perhaps
>> >> introduce cpu_physical_memory_map_nofail(...) that aborts like the
>> >> current cpu_physical_memory_map() implementation and then make non-hw/
>> >> users call that one.  hw/ users should check for failure.
>> >>
>> >> Stefan
>> >
>> > Yea, well ... at least vhost-net wants to also check
>> > it is given a ram address, not some other physical address.
>> > We could generally replace the memory management in vhost-net
>> > by some other logic, when that's done this one can
>> > go away as well.
>>
>> Sounds like you do not want to refactor physical memory access for
>> non-vhost.  Fair enough but we have to do it sooner or later in order
>> to make all of QEMU more robust.  If vhost-net is protected but the
>> IDE CD-ROM and virtio-blk disk still have issues then we haven't
>> reached our goal yet.  Any way I can convince you to do a generic API?
>> :)
>>
>> Stefan
>
> If you are talking about splitting real ram from non ram
> and creating a generic API for that, you don't need to convince me,
> but I can't commit to implementing it right now.

Okay, userspace virtio will be able to use it as well in the future.

Stefan

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2011-03-30 17:59 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-03-28 21:13 [Qemu-devel] [PATCH 0/3] virtio and vhost error handling Michael S. Tsirkin
2011-03-28 21:14 ` [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors Michael S. Tsirkin
2011-03-29 10:33   ` Amit Shah
2011-03-28 21:14 ` [Qemu-devel] [PATCH 2/3] vhost: don't exit on memory errors Michael S. Tsirkin
2011-03-28 21:14 ` [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant Michael S. Tsirkin
2011-03-29 10:53   ` Stefan Hajnoczi
2011-03-30 16:09     ` Michael S. Tsirkin
2011-03-30 16:26       ` Stefan Hajnoczi
2011-03-30 16:59         ` Michael S. Tsirkin
2011-03-30 17:59           ` Stefan Hajnoczi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).