From: Jason Wang <jasowang@redhat.com>
To: Cornelia Huck <cornelia.huck@de.ibm.com>
Cc: mst@redhat.com, qemu-devel@nongnu.org, pbonzini@redhat.com,
peterx@redhat.com
Subject: Re: [Qemu-devel] [PATCH] virtio: destroy region cache during reset
Date: Wed, 8 Mar 2017 11:18:27 +0800 [thread overview]
Message-ID: <cb84b888-4917-162d-31a5-f708f4783444@redhat.com> (raw)
In-Reply-To: <20170307111618.43ffbd13.cornelia.huck@de.ibm.com>
On 2017年03月07日 18:16, Cornelia Huck wrote:
> On Tue, 7 Mar 2017 16:47:58 +0800
> Jason Wang <jasowang@redhat.com> wrote:
>
>> We don't destroy region cache during reset which can make the maps
>> of previous driver leaked to a buggy or malicious driver that don't
>> set vring address before starting to use the device. Fix this by
>> destroy the region cache during reset and validate it before trying to
>> use them. While at it, also validate address_space_cache_init() during
>> virtio_init_region_cache() to make sure we have a correct region
>> cache.
>>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> hw/virtio/virtio.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++--------
>> 1 file changed, 76 insertions(+), 12 deletions(-)
>>
>> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
>> index 09f4cf4..90324f6 100644
>> --- a/hw/virtio/virtio.c
>> +++ b/hw/virtio/virtio.c
>> @@ -131,6 +131,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
>> VRingMemoryRegionCaches *new;
>> hwaddr addr, size;
>> int event_size;
>> + int64_t len;
>>
>> event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
>>
>> @@ -140,21 +141,41 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
>> }
>> new = g_new0(VRingMemoryRegionCaches, 1);
>> size = virtio_queue_get_desc_size(vdev, n);
>> - address_space_cache_init(&new->desc, vdev->dma_as,
>> - addr, size, false);
>> + len = address_space_cache_init(&new->desc, vdev->dma_as,
>> + addr, size, false);
>> + if (len < size) {
>> + virtio_error(vdev, "Cannot map desc");
>> + goto err_desc;
>> + }
>>
>> size = virtio_queue_get_used_size(vdev, n) + event_size;
>> - address_space_cache_init(&new->used, vdev->dma_as,
>> - vq->vring.used, size, true);
>> + len = address_space_cache_init(&new->used, vdev->dma_as,
>> + vq->vring.used, size, true);
>> + if (len < size) {
>> + virtio_error(vdev, "Cannot map used");
>> + goto err_used;
>> + }
>>
>> size = virtio_queue_get_avail_size(vdev, n) + event_size;
>> - address_space_cache_init(&new->avail, vdev->dma_as,
>> - vq->vring.avail, size, false);
>> + len = address_space_cache_init(&new->avail, vdev->dma_as,
>> + vq->vring.avail, size, false);
>> + if (len < size) {
>> + virtio_error(vdev, "Cannot map avail");
>> + goto err_avail;
>> + }
>>
>> atomic_rcu_set(&vq->vring.caches, new);
>> if (old) {
>> call_rcu(old, virtio_free_region_cache, rcu);
>> }
>> + return;
>> +
>> +err_avail:
>> + address_space_cache_destroy(&new->used);
>> +err_used:
>> + address_space_cache_destroy(&new->desc);
>> +err_desc:
>> + g_free(new);
>> }
> I think it would be more readable if you moved adding this check (which
> is a good idea) into a separate patch.
Ok.
>> /* virt queue functions */
>> @@ -190,6 +211,10 @@ static inline uint16_t vring_avail_flags(VirtQueue *vq)
>> {
>> VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
>> hwaddr pa = offsetof(VRingAvail, flags);
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map avail flags");
> I'm not sure that virtio_error is the right thing here; ending up in
> this function with !caches indicates an error in our logic.
Probably not, this can be triggered by buggy guest.
> An assert
> might be better (and I hope we can sort out all of those errors exposed
> by the introduction of region caches for 2.9...)
I thought we should avoid assert as much as possible in this case. But
if you and maintainer want an assert, it's also fine.
>
>> + return 0;
>> + }
>> return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
>> }
>>
>> @@ -198,6 +223,10 @@ static inline uint16_t vring_avail_idx(VirtQueue *vq)
>> {
>> VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
>> hwaddr pa = offsetof(VRingAvail, idx);
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map avail idx");
>> + return vq->shadow_avail_idx;
>> + }
>> vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
>> return vq->shadow_avail_idx;
>> }
>> @@ -207,6 +236,10 @@ static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
>> {
>> VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
>> hwaddr pa = offsetof(VRingAvail, ring[i]);
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map avail ring");
>> + return 0;
>> + }
>> return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
>> }
>>
>> @@ -222,6 +255,10 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
>> {
>> VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
>> hwaddr pa = offsetof(VRingUsed, ring[i]);
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map used ring");
>> + return;
>> + }
>> virtio_tswap32s(vq->vdev, &uelem->id);
>> virtio_tswap32s(vq->vdev, &uelem->len);
>> address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
>> @@ -233,6 +270,10 @@ static uint16_t vring_used_idx(VirtQueue *vq)
>> {
>> VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
>> hwaddr pa = offsetof(VRingUsed, idx);
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map used ring");
>> + return 0;
>> + }
>> return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
>> }
>>
>> @@ -241,6 +282,10 @@ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
>> {
>> VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
>> hwaddr pa = offsetof(VRingUsed, idx);
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map used idx");
>> + return;
>> + }
>> virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
>> address_space_cache_invalidate(&caches->used, pa, sizeof(val));
>> vq->used_idx = val;
>> @@ -254,6 +299,10 @@ static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
>> hwaddr pa = offsetof(VRingUsed, flags);
>> uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
>>
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map used flags");
> Regardless of whether using virtio_error here is fine: caches was
> already dereferenced above...
>
>> + return;
>> + }
>> virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
>> address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
>> }
>> @@ -266,6 +315,10 @@ static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
>> hwaddr pa = offsetof(VRingUsed, flags);
>> uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
>>
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map used flags");
> dito
>
>> + return;
>> + }
>> virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
>> address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
>> }
>> @@ -280,6 +333,10 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
>> }
>>
>> caches = atomic_rcu_read(&vq->vring.caches);
>> + if (!caches) {
>> + virtio_error(vq->vdev, "Cannot map avail event");
>> + return;
>> + }
>> pa = offsetof(VRingUsed, ring[vq->vring.num]);
>> virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
>> address_space_cache_invalidate(&caches->used, pa, sizeof(val));
>> @@ -552,7 +609,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
>>
>> max = vq->vring.num;
>> caches = atomic_rcu_read(&vq->vring.caches);
>> - if (caches->desc.len < max * sizeof(VRingDesc)) {
>> + if (!caches || caches->desc.len < max * sizeof(VRingDesc)) {
>> virtio_error(vdev, "Cannot map descriptor ring");
>> goto err;
>> }
>> @@ -819,7 +876,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
>> i = head;
>>
>> caches = atomic_rcu_read(&vq->vring.caches);
>> - if (caches->desc.len < max * sizeof(VRingDesc)) {
>> + if (!caches || caches->desc.len < max * sizeof(VRingDesc)) {
>> virtio_error(vdev, "Cannot map descriptor ring");
>> goto done;
>> }
>> @@ -1117,6 +1174,15 @@ static enum virtio_device_endian virtio_current_cpu_endian(void)
>> }
>> }
>>
>> +static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
>> +{
>> + VRingMemoryRegionCaches *caches;
>> +
>> + caches = atomic_read(&vq->vring.caches);
>> + atomic_set(&vq->vring.caches, NULL);
>> + virtio_free_region_cache(caches);
> Shouldn't this use rcu to free it? Unconditionally setting caches to
> NULL feels wrong...
Right, will switch to use rcu.
>> +}
>> +
>> void virtio_reset(void *opaque)
>> {
>> VirtIODevice *vdev = opaque;
>> @@ -1157,6 +1223,7 @@ void virtio_reset(void *opaque)
>> vdev->vq[i].notification = true;
>> vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
>> vdev->vq[i].inuse = 0;
>> + virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
> ...especially as you call it in a reset context here.
>
>> }
>> }
>>
>> @@ -2451,13 +2518,10 @@ static void virtio_device_free_virtqueues(VirtIODevice *vdev)
>> }
>>
>> for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
>> - VRingMemoryRegionCaches *caches;
>> if (vdev->vq[i].vring.num == 0) {
>> break;
>> }
>> - caches = atomic_read(&vdev->vq[i].vring.caches);
>> - atomic_set(&vdev->vq[i].vring.caches, NULL);
>> - virtio_free_region_cache(caches);
>> + virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
> OTOH, immediate destruction may still be called for during device
> finalization.
>
Right but to avoid code duplication, use rcu unconditionally should be
no harm here.
Thanks
>> }
>> g_free(vdev->vq);
>> }
next prev parent reply other threads:[~2017-03-08 3:18 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-07 8:47 [Qemu-devel] [PATCH] virtio: destroy region cache during reset Jason Wang
2017-03-07 10:16 ` Cornelia Huck
2017-03-08 3:18 ` Jason Wang [this message]
2017-03-08 9:19 ` Cornelia Huck
2017-03-08 9:51 ` Jason Wang
2017-03-08 10:12 ` Cornelia Huck
2017-03-09 2:19 ` Jason Wang
2017-03-09 11:07 ` Cornelia Huck
2017-03-09 11:12 ` Paolo Bonzini
2017-03-09 11:38 ` Cornelia Huck
2017-03-10 10:57 ` Jason Wang
2017-03-07 10:55 ` Paolo Bonzini
2017-03-08 3:21 ` Jason Wang
2017-03-08 6:22 ` Jason Wang
2017-03-08 9:10 ` Paolo Bonzini
2017-03-08 9:48 ` Jason Wang
2017-03-09 11:10 ` Paolo Bonzini
2017-03-09 11:49 ` Cornelia Huck
2017-03-08 9:30 ` Cornelia Huck
2017-03-08 9:53 ` Jason Wang
2017-03-08 10:15 ` Cornelia Huck
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=cb84b888-4917-162d-31a5-f708f4783444@redhat.com \
--to=jasowang@redhat.com \
--cc=cornelia.huck@de.ibm.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).