* [PATCH 0/2] libvhost-user: return after vu_panic()
@ 2020-09-21 11:34 Stefan Hajnoczi
2020-09-21 11:34 ` [PATCH 1/2] libvhost-user: return early on virtqueue errors Stefan Hajnoczi
2020-09-21 11:34 ` [PATCH 2/2] libvhost-user: return on error in vu_log_queue_fill() Stefan Hajnoczi
0 siblings, 2 replies; 5+ messages in thread
From: Stefan Hajnoczi @ 2020-09-21 11:34 UTC (permalink / raw)
To: qemu-devel; +Cc: Marc-André Lureau, Stefan Hajnoczi, Michael S . Tsirkin
vu_panic() is not guaranteed to exit the program. Return errors instead.
Most of the code already does this but I spotted some cases that weren't
handled yet.
Stefan Hajnoczi (2):
libvhost-user: return early on virtqueue errors
libvhost-user: return on error in vu_log_queue_fill()
contrib/libvhost-user/libvhost-user.c | 28 +++++++++++++++++----------
1 file changed, 18 insertions(+), 10 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/2] libvhost-user: return early on virtqueue errors
2020-09-21 11:34 [PATCH 0/2] libvhost-user: return after vu_panic() Stefan Hajnoczi
@ 2020-09-21 11:34 ` Stefan Hajnoczi
2020-09-21 14:26 ` Philippe Mathieu-Daudé
2020-09-21 11:34 ` [PATCH 2/2] libvhost-user: return on error in vu_log_queue_fill() Stefan Hajnoczi
1 sibling, 1 reply; 5+ messages in thread
From: Stefan Hajnoczi @ 2020-09-21 11:34 UTC (permalink / raw)
To: qemu-devel; +Cc: Marc-André Lureau, Stefan Hajnoczi, Michael S . Tsirkin
vu_panic() is not guaranteed to exit the program. Return early when
errors are encountered.
Note that libvhost-user does not have an "unmap" operation for mapped
descriptors. Therefore it is correct to return without explicit cleanup.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
contrib/libvhost-user/libvhost-user.c | 27 +++++++++++++++++----------
1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 53f16bdf08..27626e629a 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -2407,7 +2407,7 @@ vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
}
}
-static void
+static bool
virtqueue_map_desc(VuDev *dev,
unsigned int *p_num_sg, struct iovec *iov,
unsigned int max_num_sg, bool is_write,
@@ -2419,7 +2419,7 @@ virtqueue_map_desc(VuDev *dev,
if (!sz) {
vu_panic(dev, "virtio: zero sized buffers are not allowed");
- return;
+ return false;
}
while (sz) {
@@ -2427,13 +2427,13 @@ virtqueue_map_desc(VuDev *dev,
if (num_sg == max_num_sg) {
vu_panic(dev, "virtio: too many descriptors in indirect table");
- return;
+ return false;
}
iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
if (iov[num_sg].iov_base == NULL) {
vu_panic(dev, "virtio: invalid address for buffers");
- return;
+ return false;
}
iov[num_sg].iov_len = len;
num_sg++;
@@ -2442,6 +2442,7 @@ virtqueue_map_desc(VuDev *dev,
}
*p_num_sg = num_sg;
+ return true;
}
static void *
@@ -2479,6 +2480,7 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
if (desc[i].flags & VRING_DESC_F_INDIRECT) {
if (desc[i].len % sizeof(struct vring_desc)) {
vu_panic(dev, "Invalid size for indirect buffer table");
+ return NULL;
}
/* loop over the indirect descriptor table */
@@ -2506,22 +2508,27 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
/* Collect all the descriptors */
do {
if (desc[i].flags & VRING_DESC_F_WRITE) {
- virtqueue_map_desc(dev, &in_num, iov + out_num,
- VIRTQUEUE_MAX_SIZE - out_num, true,
- desc[i].addr, desc[i].len);
+ if (!virtqueue_map_desc(dev, &in_num, iov + out_num,
+ VIRTQUEUE_MAX_SIZE - out_num, true,
+ desc[i].addr, desc[i].len)) {
+ return NULL;
+ }
} else {
if (in_num) {
vu_panic(dev, "Incorrect order for descriptors");
return NULL;
}
- virtqueue_map_desc(dev, &out_num, iov,
- VIRTQUEUE_MAX_SIZE, false,
- desc[i].addr, desc[i].len);
+ if (!virtqueue_map_desc(dev, &out_num, iov,
+ VIRTQUEUE_MAX_SIZE, false,
+ desc[i].addr, desc[i].len)) {
+ return NULL;
+ }
}
/* If we've got too many, that implies a descriptor loop. */
if ((in_num + out_num) > max) {
vu_panic(dev, "Looped descriptor");
+ return NULL;
}
rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
--
2.26.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] libvhost-user: return on error in vu_log_queue_fill()
2020-09-21 11:34 [PATCH 0/2] libvhost-user: return after vu_panic() Stefan Hajnoczi
2020-09-21 11:34 ` [PATCH 1/2] libvhost-user: return early on virtqueue errors Stefan Hajnoczi
@ 2020-09-21 11:34 ` Stefan Hajnoczi
2020-09-21 14:25 ` Philippe Mathieu-Daudé
1 sibling, 1 reply; 5+ messages in thread
From: Stefan Hajnoczi @ 2020-09-21 11:34 UTC (permalink / raw)
To: qemu-devel; +Cc: Marc-André Lureau, Stefan Hajnoczi, Michael S . Tsirkin
vu_panic() is not guaranteed to exit the program. Return early when
errors are encountered.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
contrib/libvhost-user/libvhost-user.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 27626e629a..1fc2cb12ce 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -2722,6 +2722,7 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
if (desc[i].flags & VRING_DESC_F_INDIRECT) {
if (desc[i].len % sizeof(struct vring_desc)) {
vu_panic(dev, "Invalid size for indirect buffer table");
+ return;
}
/* loop over the indirect descriptor table */
--
2.26.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 2/2] libvhost-user: return on error in vu_log_queue_fill()
2020-09-21 11:34 ` [PATCH 2/2] libvhost-user: return on error in vu_log_queue_fill() Stefan Hajnoczi
@ 2020-09-21 14:25 ` Philippe Mathieu-Daudé
0 siblings, 0 replies; 5+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-09-21 14:25 UTC (permalink / raw)
To: Stefan Hajnoczi, qemu-devel; +Cc: Marc-André Lureau, Michael S . Tsirkin
On 9/21/20 1:34 PM, Stefan Hajnoczi wrote:
> vu_panic() is not guaranteed to exit the program. Return early when
> errors are encountered.
>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> contrib/libvhost-user/libvhost-user.c | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
> index 27626e629a..1fc2cb12ce 100644
> --- a/contrib/libvhost-user/libvhost-user.c
> +++ b/contrib/libvhost-user/libvhost-user.c
> @@ -2722,6 +2722,7 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
> if (desc[i].flags & VRING_DESC_F_INDIRECT) {
> if (desc[i].len % sizeof(struct vring_desc)) {
> vu_panic(dev, "Invalid size for indirect buffer table");
> + return;
> }
>
> /* loop over the indirect descriptor table */
>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] libvhost-user: return early on virtqueue errors
2020-09-21 11:34 ` [PATCH 1/2] libvhost-user: return early on virtqueue errors Stefan Hajnoczi
@ 2020-09-21 14:26 ` Philippe Mathieu-Daudé
0 siblings, 0 replies; 5+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-09-21 14:26 UTC (permalink / raw)
To: Stefan Hajnoczi, qemu-devel; +Cc: Marc-André Lureau, Michael S . Tsirkin
On 9/21/20 1:34 PM, Stefan Hajnoczi wrote:
> vu_panic() is not guaranteed to exit the program. Return early when
> errors are encountered.
>
> Note that libvhost-user does not have an "unmap" operation for mapped
> descriptors. Therefore it is correct to return without explicit cleanup.
>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> contrib/libvhost-user/libvhost-user.c | 27 +++++++++++++++++----------
> 1 file changed, 17 insertions(+), 10 deletions(-)
>
> diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
> index 53f16bdf08..27626e629a 100644
> --- a/contrib/libvhost-user/libvhost-user.c
> +++ b/contrib/libvhost-user/libvhost-user.c
> @@ -2407,7 +2407,7 @@ vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
> }
> }
>
> -static void
> +static bool
> virtqueue_map_desc(VuDev *dev,
> unsigned int *p_num_sg, struct iovec *iov,
> unsigned int max_num_sg, bool is_write,
> @@ -2419,7 +2419,7 @@ virtqueue_map_desc(VuDev *dev,
>
> if (!sz) {
> vu_panic(dev, "virtio: zero sized buffers are not allowed");
> - return;
> + return false;
> }
>
> while (sz) {
> @@ -2427,13 +2427,13 @@ virtqueue_map_desc(VuDev *dev,
>
> if (num_sg == max_num_sg) {
> vu_panic(dev, "virtio: too many descriptors in indirect table");
> - return;
> + return false;
> }
>
> iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
> if (iov[num_sg].iov_base == NULL) {
> vu_panic(dev, "virtio: invalid address for buffers");
> - return;
> + return false;
> }
> iov[num_sg].iov_len = len;
> num_sg++;
> @@ -2442,6 +2442,7 @@ virtqueue_map_desc(VuDev *dev,
> }
>
> *p_num_sg = num_sg;
> + return true;
> }
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
>
> static void *
> @@ -2479,6 +2480,7 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
> if (desc[i].flags & VRING_DESC_F_INDIRECT) {
> if (desc[i].len % sizeof(struct vring_desc)) {
> vu_panic(dev, "Invalid size for indirect buffer table");
> + return NULL;
> }
>
> /* loop over the indirect descriptor table */
> @@ -2506,22 +2508,27 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
> /* Collect all the descriptors */
> do {
> if (desc[i].flags & VRING_DESC_F_WRITE) {
> - virtqueue_map_desc(dev, &in_num, iov + out_num,
> - VIRTQUEUE_MAX_SIZE - out_num, true,
> - desc[i].addr, desc[i].len);
> + if (!virtqueue_map_desc(dev, &in_num, iov + out_num,
> + VIRTQUEUE_MAX_SIZE - out_num, true,
> + desc[i].addr, desc[i].len)) {
> + return NULL;
> + }
> } else {
> if (in_num) {
> vu_panic(dev, "Incorrect order for descriptors");
> return NULL;
> }
> - virtqueue_map_desc(dev, &out_num, iov,
> - VIRTQUEUE_MAX_SIZE, false,
> - desc[i].addr, desc[i].len);
> + if (!virtqueue_map_desc(dev, &out_num, iov,
> + VIRTQUEUE_MAX_SIZE, false,
> + desc[i].addr, desc[i].len)) {
> + return NULL;
> + }
> }
>
> /* If we've got too many, that implies a descriptor loop. */
> if ((in_num + out_num) > max) {
> vu_panic(dev, "Looped descriptor");
> + return NULL;
> }
> rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
> } while (rc == VIRTQUEUE_READ_DESC_MORE);
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2020-09-21 14:32 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-09-21 11:34 [PATCH 0/2] libvhost-user: return after vu_panic() Stefan Hajnoczi
2020-09-21 11:34 ` [PATCH 1/2] libvhost-user: return early on virtqueue errors Stefan Hajnoczi
2020-09-21 14:26 ` Philippe Mathieu-Daudé
2020-09-21 11:34 ` [PATCH 2/2] libvhost-user: return on error in vu_log_queue_fill() Stefan Hajnoczi
2020-09-21 14:25 ` Philippe Mathieu-Daudé
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).