qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Anthony Liguori <anthony@codemonkey.ws>
To: "Michael S. Tsirkin" <mst@redhat.com>
Cc: qemu-devel@nongnu.org
Subject: [Qemu-devel] Re: [PATCH RFC] vhost: ring: verify ring is not being moved
Date: Tue, 02 Mar 2010 10:59:09 -0600	[thread overview]
Message-ID: <4B8D43DD.8080302@codemonkey.ws> (raw)
In-Reply-To: <20100302165434.GA8690@redhat.com>

On 03/02/2010 10:54 AM, Michael S. Tsirkin wrote:
> abort if it is
>
> Signed-off-by: Michael S. Tsirkin<mst@redhat.com>
> ---
>
> So the following is a simple solution for unstable
> ring mappings security issue: simply detect this and stop.
>
> Will repost series with this later after some testing,
> but this is an RFC to get early feedback if any.
>    

It's certainly a reasonable compromise.

Regards,

Anthony Liguori

>   hw/vhost.c |   52 +++++++++++++++++++++++++++++++++++++++++++++++++---
>   hw/vhost.h |    3 +++
>   2 files changed, 52 insertions(+), 3 deletions(-)
>
> diff --git a/hw/vhost.c b/hw/vhost.c
> index 3b3a109..b9e115e 100644
> --- a/hw/vhost.c
> +++ b/hw/vhost.c
> @@ -256,6 +256,33 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
>       dev->log_size = size;
>   }
>
> +static int vhost_verify_ring_mappings(struct vhost_dev *dev,
> +                                      uint64_t start_addr,
> +                                      uint64_t size)
> +{
> +    int i;
> +    for (i = 0; i<  dev->nvqs; ++i) {
> +        struct vhost_virtqueue *vq = dev->vqs + i;
> +        target_phys_addr_t l;
> +        void *p;
> +
> +        if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size))
> +            continue;
> +        l = vq->ring_size;
> +        p = cpu_physical_memory_map(vq->ring_phys,&l, 1);
> +        if (!p || l != vq->ring_size) {
> +            fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
> +            return -ENOMEM;
> +        }
> +        if (p != vq->ring) {
> +            fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
> +            return -EBUSY;
> +        }
> +        cpu_physical_memory_unmap(p, l, 0, 0);
> +    }
> +    return 0;
> +}
> +
>   static void vhost_client_set_memory(CPUPhysMemoryClient *client,
>                                       target_phys_addr_t start_addr,
>                                       ram_addr_t size,
> @@ -284,6 +311,12 @@ static void vhost_client_set_memory(CPUPhysMemoryClient *client,
>       if (!dev->started) {
>           return;
>       }
> +
> +    if (dev->started) {
> +        r = vhost_verify_ring_mappings(dev, start_addr, size);
> +        assert(r>= 0);
> +    }
> +
>       if (!dev->log_enabled) {
>           r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
>           assert(r>= 0);
> @@ -442,6 +475,14 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
>           goto fail_alloc_used;
>       }
>
> +    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
> +    vq->ring_phys = a = virtio_queue_get_ring(vdev, idx);
> +    vq->ring = cpu_physical_memory_map(a,&l, 1);
> +    if (!vq->ring || l != s) {
> +        r = -ENOMEM;
> +        goto fail_alloc_ring;
> +    }
> +
>       r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
>       if (r<  0) {
>           r = -errno;
> @@ -485,6 +526,9 @@ fail_host_notifier:
>       vdev->binding->guest_notifier(vdev->binding_opaque, idx, false);
>   fail_guest_notifier:
>   fail_alloc:
> +    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
> +                              0, 0);
> +fail_alloc_ring:
>       cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
>                                 0, 0);
>   fail_alloc_used:
> @@ -526,12 +570,14 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
>       }
>       virtio_queue_set_last_avail_idx(vdev, idx, state.num);
>       assert (r>= 0);
> +    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
> +                              0, virtio_queue_get_ring_size(vdev, idx));
>       cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
>                                 0, 0);
>       cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
>                                 0, 0);
>       cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
>                                 0, 0);
>   }
>
>   int vhost_dev_init(struct vhost_dev *hdev, int devfd)
> diff --git a/hw/vhost.h b/hw/vhost.h
> index 48b52c7..86dd834 100644
> --- a/hw/vhost.h
> +++ b/hw/vhost.h
> @@ -14,6 +14,9 @@ struct vhost_virtqueue {
>       int num;
>       unsigned long long used_phys;
>       unsigned used_size;
> +    void *ring;
> +    unsigned long long ring_phys;
> +    unsigned ring_size;
>   };
>
>   typedef unsigned long vhost_log_chunk_t;
>    

      reply	other threads:[~2010-03-02 16:59 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-03-02 16:54 [Qemu-devel] [PATCH RFC] vhost: ring: verify ring is not being moved Michael S. Tsirkin
2010-03-02 16:59 ` Anthony Liguori [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4B8D43DD.8080302@codemonkey.ws \
    --to=anthony@codemonkey.ws \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).