qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Marc-André Lureau" <marcandre.lureau@redhat.com>,
	"Stefan Hajnoczi" <stefanha@redhat.com>,
	"Michael S . Tsirkin" <mst@redhat.com>
Subject: [PATCH 1/2] libvhost-user: return early on virtqueue errors
Date: Mon, 21 Sep 2020 12:34:19 +0100	[thread overview]
Message-ID: <20200921113420.154378-2-stefanha@redhat.com> (raw)
In-Reply-To: <20200921113420.154378-1-stefanha@redhat.com>

vu_panic() is not guaranteed to exit the program. Return early when
errors are encountered.

Note that libvhost-user does not have an "unmap" operation for mapped
descriptors. Therefore it is correct to return without explicit cleanup.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 contrib/libvhost-user/libvhost-user.c | 27 +++++++++++++++++----------
 1 file changed, 17 insertions(+), 10 deletions(-)

diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 53f16bdf08..27626e629a 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -2407,7 +2407,7 @@ vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
     }
 }
 
-static void
+static bool
 virtqueue_map_desc(VuDev *dev,
                    unsigned int *p_num_sg, struct iovec *iov,
                    unsigned int max_num_sg, bool is_write,
@@ -2419,7 +2419,7 @@ virtqueue_map_desc(VuDev *dev,
 
     if (!sz) {
         vu_panic(dev, "virtio: zero sized buffers are not allowed");
-        return;
+        return false;
     }
 
     while (sz) {
@@ -2427,13 +2427,13 @@ virtqueue_map_desc(VuDev *dev,
 
         if (num_sg == max_num_sg) {
             vu_panic(dev, "virtio: too many descriptors in indirect table");
-            return;
+            return false;
         }
 
         iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
         if (iov[num_sg].iov_base == NULL) {
             vu_panic(dev, "virtio: invalid address for buffers");
-            return;
+            return false;
         }
         iov[num_sg].iov_len = len;
         num_sg++;
@@ -2442,6 +2442,7 @@ virtqueue_map_desc(VuDev *dev,
     }
 
     *p_num_sg = num_sg;
+    return true;
 }
 
 static void *
@@ -2479,6 +2480,7 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
     if (desc[i].flags & VRING_DESC_F_INDIRECT) {
         if (desc[i].len % sizeof(struct vring_desc)) {
             vu_panic(dev, "Invalid size for indirect buffer table");
+            return NULL;
         }
 
         /* loop over the indirect descriptor table */
@@ -2506,22 +2508,27 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
     /* Collect all the descriptors */
     do {
         if (desc[i].flags & VRING_DESC_F_WRITE) {
-            virtqueue_map_desc(dev, &in_num, iov + out_num,
-                               VIRTQUEUE_MAX_SIZE - out_num, true,
-                               desc[i].addr, desc[i].len);
+            if (!virtqueue_map_desc(dev, &in_num, iov + out_num,
+                                    VIRTQUEUE_MAX_SIZE - out_num, true,
+                                    desc[i].addr, desc[i].len)) {
+                return NULL;
+            }
         } else {
             if (in_num) {
                 vu_panic(dev, "Incorrect order for descriptors");
                 return NULL;
             }
-            virtqueue_map_desc(dev, &out_num, iov,
-                               VIRTQUEUE_MAX_SIZE, false,
-                               desc[i].addr, desc[i].len);
+            if (!virtqueue_map_desc(dev, &out_num, iov,
+                                    VIRTQUEUE_MAX_SIZE, false,
+                                    desc[i].addr, desc[i].len)) {
+                return NULL;
+            }
         }
 
         /* If we've got too many, that implies a descriptor loop. */
         if ((in_num + out_num) > max) {
             vu_panic(dev, "Looped descriptor");
+            return NULL;
         }
         rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
     } while (rc == VIRTQUEUE_READ_DESC_MORE);
-- 
2.26.2


  reply	other threads:[~2020-09-21 11:52 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-21 11:34 [PATCH 0/2] libvhost-user: return after vu_panic() Stefan Hajnoczi
2020-09-21 11:34 ` Stefan Hajnoczi [this message]
2020-09-21 14:26   ` [PATCH 1/2] libvhost-user: return early on virtqueue errors Philippe Mathieu-Daudé
2020-09-21 11:34 ` [PATCH 2/2] libvhost-user: return on error in vu_log_queue_fill() Stefan Hajnoczi
2020-09-21 14:25   ` Philippe Mathieu-Daudé

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200921113420.154378-2-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=marcandre.lureau@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).