From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org, Anthony Liguori <anthony@codemonkey.ws>,
gleb@redhat.com, Jason Wang <jasowang@redhat.com>,
Alex Williamson <alex.williamson@redhat.com>,
Jes Sorensen <Jes.Sorensen@redhat.com>,
Amit Shah <amit.shah@redhat.com>,
Christoph Hellwig <hch@infradead.org>,
armbru@redhat.com, kwolf@redhat.com
Subject: [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant
Date: Mon, 28 Mar 2011 23:14:27 +0200 [thread overview]
Message-ID: <a9bae93a939257ae0c01d136aaebd9d488cff071.1301346785.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1301346785.git.mst@redhat.com>
vhost used cpu_physical_memory_map to get the
virtual address for the ring, however,
this will exit on an illegal RAM address.
Since the addresses are guest-controlled, we
shouldn't do that.
Switch to our own variant that uses the vhost
tables and returns an error instead of exiting.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
hw/vhost.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 52 insertions(+), 14 deletions(-)
diff --git a/hw/vhost.c b/hw/vhost.c
index c17a831..5fd09b5 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -271,6 +271,44 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
dev->log_size = size;
}
+/* Same as cpu_physical_memory_map but doesn't allocate,
+ * doesn't use a bounce buffer, checks input for errors such
+ * as wrap-around, and does not exit on failure. */
+static void *vhost_memory_map(struct vhost_dev *dev,
+ uint64_t addr,
+ uint64_t *size,
+ int is_write)
+{
+ int i;
+ if (addr + *size < addr) {
+ *size = -addr;
+ }
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ uint64_t rlast, mlast, userspace_addr;
+ if (!range_covers_byte(reg->guest_phys_addr, reg->memory_size, addr)) {
+ continue;
+ }
+ rlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
+ mlast = range_get_last(addr, *size);
+ if (rlast < mlast) {
+ *size -= (mlast - rlast);
+ }
+ userspace_addr = reg->userspace_addr + addr - reg->guest_phys_addr;
+ if ((unsigned long)userspace_addr != userspace_addr) {
+ return NULL;
+ }
+ return (void *)((unsigned long)userspace_addr);
+ }
+ return NULL;
+}
+
+/* Placeholder to keep the API consistent with cpu_physical_memory_unmap. */
+static void vhost_memory_unmap(void *buffer, uint64_t len,
+ int is_write, uint64_t access_len)
+{
+}
+
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size)
@@ -285,7 +323,7 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
continue;
}
l = vq->ring_size;
- p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
+ p = vhost_memory_map(dev, vq->ring_phys, &l, 1);
if (!p || l != vq->ring_size) {
virtio_error(dev->vdev, "Unable to map ring buffer for ring %d\n", i);
return -ENOMEM;
@@ -294,7 +332,7 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
virtio_error(dev->vdev, "Ring buffer relocated for ring %d\n", i);
return -EBUSY;
}
- cpu_physical_memory_unmap(p, l, 0, 0);
+ vhost_memory_unmap(p, l, 0, 0);
}
return 0;
}
@@ -480,21 +518,21 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
s = l = virtio_queue_get_desc_size(vdev, idx);
a = virtio_queue_get_desc_addr(vdev, idx);
- vq->desc = cpu_physical_memory_map(a, &l, 0);
+ vq->desc = vhost_memory_map(dev, a, &l, 0);
if (!vq->desc || l != s) {
r = -ENOMEM;
goto fail_alloc_desc;
}
s = l = virtio_queue_get_avail_size(vdev, idx);
a = virtio_queue_get_avail_addr(vdev, idx);
- vq->avail = cpu_physical_memory_map(a, &l, 0);
+ vq->avail = vhost_memory_map(dev, a, &l, 0);
if (!vq->avail || l != s) {
r = -ENOMEM;
goto fail_alloc_avail;
}
vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
- vq->used = cpu_physical_memory_map(a, &l, 1);
+ vq->used = vhost_memory_map(dev, a, &l, 1);
if (!vq->used || l != s) {
r = -ENOMEM;
goto fail_alloc_used;
@@ -502,7 +540,7 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
- vq->ring = cpu_physical_memory_map(a, &l, 1);
+ vq->ring = vhost_memory_map(dev, a, &l, 1);
if (!vq->ring || l != s) {
r = -ENOMEM;
goto fail_alloc_ring;
@@ -540,16 +578,16 @@ fail_kick:
vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
fail_host_notifier:
fail_alloc:
- cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
+ vhost_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
0, 0);
fail_alloc_ring:
- cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
+ vhost_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
0, 0);
fail_alloc_used:
- cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
+ vhost_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
0, 0);
fail_alloc_avail:
- cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
+ vhost_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
0, 0);
fail_alloc_desc:
return r;
@@ -577,13 +615,13 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
}
virtio_queue_set_last_avail_idx(vdev, idx, state.num);
assert (r >= 0);
- cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
+ vhost_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
0, virtio_queue_get_ring_size(vdev, idx));
- cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
+ vhost_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
1, virtio_queue_get_used_size(vdev, idx));
- cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
+ vhost_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
0, virtio_queue_get_avail_size(vdev, idx));
- cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
+ vhost_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
0, virtio_queue_get_desc_size(vdev, idx));
}
--
1.7.3.2.91.g446ac
next prev parent reply other threads:[~2011-03-28 21:14 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-03-28 21:13 [Qemu-devel] [PATCH 0/3] virtio and vhost error handling Michael S. Tsirkin
2011-03-28 21:14 ` [Qemu-devel] [PATCH 1/3] virtio: don't exit on guest errors Michael S. Tsirkin
2011-03-29 10:33 ` Amit Shah
2011-03-28 21:14 ` [Qemu-devel] [PATCH 2/3] vhost: don't exit on memory errors Michael S. Tsirkin
2011-03-28 21:14 ` Michael S. Tsirkin [this message]
2011-03-29 10:53 ` [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant Stefan Hajnoczi
2011-03-30 16:09 ` Michael S. Tsirkin
2011-03-30 16:26 ` Stefan Hajnoczi
2011-03-30 16:59 ` Michael S. Tsirkin
2011-03-30 17:59 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a9bae93a939257ae0c01d136aaebd9d488cff071.1301346785.git.mst@redhat.com \
--to=mst@redhat.com \
--cc=Jes.Sorensen@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=amit.shah@redhat.com \
--cc=anthony@codemonkey.ws \
--cc=armbru@redhat.com \
--cc=gleb@redhat.com \
--cc=hch@infradead.org \
--cc=jasowang@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).