* [PATCH] vdpa: Clean vhost_vdpa_dev_start(dev, false)
@ 2022-07-12 7:15 Eugenio Pérez
2022-07-14 6:45 ` Jason Wang
0 siblings, 1 reply; 2+ messages in thread
From: Eugenio Pérez @ 2022-07-12 7:15 UTC (permalink / raw)
To: qemu-devel; +Cc: Jason Wang, Laurent Vivier, Michael S. Tsirkin, qemu-trivial
Return value is never checked and is a clean path, so assume success
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-vdpa.c | 33 ++++++++++-----------------------
1 file changed, 10 insertions(+), 23 deletions(-)
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 66f054a12c..d6ba4a492a 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -872,41 +872,35 @@ static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
/**
* Unmap a SVQ area in the device
*/
-static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
+static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
const DMAMap *needle)
{
const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, needle);
hwaddr size;
- int r;
if (unlikely(!result)) {
error_report("Unable to find SVQ address to unmap");
- return false;
+ return;
}
size = ROUND_UP(result->size, qemu_real_host_page_size());
- r = vhost_vdpa_dma_unmap(v, result->iova, size);
- return r == 0;
+ vhost_vdpa_dma_unmap(v, result->iova, size);
}
-static bool vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
+static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
const VhostShadowVirtqueue *svq)
{
DMAMap needle = {};
struct vhost_vdpa *v = dev->opaque;
struct vhost_vring_addr svq_addr;
- bool ok;
vhost_svq_get_vring_addr(svq, &svq_addr);
needle.translated_addr = svq_addr.desc_user_addr;
- ok = vhost_vdpa_svq_unmap_ring(v, &needle);
- if (unlikely(!ok)) {
- return false;
- }
+ vhost_vdpa_svq_unmap_ring(v, &needle);
needle.translated_addr = svq_addr.used_user_addr;
- return vhost_vdpa_svq_unmap_ring(v, &needle);
+ vhost_vdpa_svq_unmap_ring(v, &needle);
}
/**
@@ -1066,23 +1060,19 @@ err:
return false;
}
-static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev)
+static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
if (!v->shadow_vqs) {
- return true;
+ return;
}
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
- bool ok = vhost_vdpa_svq_unmap_rings(dev, svq);
- if (unlikely(!ok)) {
- return false;
- }
+ vhost_vdpa_svq_unmap_rings(dev, svq);
}
- return true;
}
static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
@@ -1099,10 +1089,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
}
vhost_vdpa_set_vring_ready(dev);
} else {
- ok = vhost_vdpa_svqs_stop(dev);
- if (unlikely(!ok)) {
- return -1;
- }
+ vhost_vdpa_svqs_stop(dev);
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
}
--
2.31.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] vdpa: Clean vhost_vdpa_dev_start(dev, false)
2022-07-12 7:15 [PATCH] vdpa: Clean vhost_vdpa_dev_start(dev, false) Eugenio Pérez
@ 2022-07-14 6:45 ` Jason Wang
0 siblings, 0 replies; 2+ messages in thread
From: Jason Wang @ 2022-07-14 6:45 UTC (permalink / raw)
To: Eugenio Pérez
Cc: qemu-devel, Laurent Vivier, Michael S. Tsirkin, QEMU Trivial
On Tue, Jul 12, 2022 at 3:15 PM Eugenio Pérez <eperezma@redhat.com> wrote:
>
> Return value is never checked and is a clean path, so assume success
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
Acked-by: Jason Wang <jasowang@redhat.com>
> hw/virtio/vhost-vdpa.c | 33 ++++++++++-----------------------
> 1 file changed, 10 insertions(+), 23 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 66f054a12c..d6ba4a492a 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -872,41 +872,35 @@ static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
> /**
> * Unmap a SVQ area in the device
> */
> -static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
> +static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
> const DMAMap *needle)
> {
> const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, needle);
> hwaddr size;
> - int r;
>
> if (unlikely(!result)) {
> error_report("Unable to find SVQ address to unmap");
> - return false;
> + return;
> }
>
> size = ROUND_UP(result->size, qemu_real_host_page_size());
> - r = vhost_vdpa_dma_unmap(v, result->iova, size);
> - return r == 0;
> + vhost_vdpa_dma_unmap(v, result->iova, size);
> }
>
> -static bool vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> +static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> const VhostShadowVirtqueue *svq)
> {
> DMAMap needle = {};
> struct vhost_vdpa *v = dev->opaque;
> struct vhost_vring_addr svq_addr;
> - bool ok;
>
> vhost_svq_get_vring_addr(svq, &svq_addr);
>
> needle.translated_addr = svq_addr.desc_user_addr;
> - ok = vhost_vdpa_svq_unmap_ring(v, &needle);
> - if (unlikely(!ok)) {
> - return false;
> - }
> + vhost_vdpa_svq_unmap_ring(v, &needle);
>
> needle.translated_addr = svq_addr.used_user_addr;
> - return vhost_vdpa_svq_unmap_ring(v, &needle);
> + vhost_vdpa_svq_unmap_ring(v, &needle);
> }
>
> /**
> @@ -1066,23 +1060,19 @@ err:
> return false;
> }
>
> -static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev)
> +static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
> {
> struct vhost_vdpa *v = dev->opaque;
>
> if (!v->shadow_vqs) {
> - return true;
> + return;
> }
>
> for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
> VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
> - bool ok = vhost_vdpa_svq_unmap_rings(dev, svq);
> - if (unlikely(!ok)) {
> - return false;
> - }
> + vhost_vdpa_svq_unmap_rings(dev, svq);
> }
>
> - return true;
> }
>
> static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> @@ -1099,10 +1089,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> }
> vhost_vdpa_set_vring_ready(dev);
> } else {
> - ok = vhost_vdpa_svqs_stop(dev);
> - if (unlikely(!ok)) {
> - return -1;
> - }
> + vhost_vdpa_svqs_stop(dev);
> vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> }
>
> --
> 2.31.1
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-07-14 6:52 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-07-12 7:15 [PATCH] vdpa: Clean vhost_vdpa_dev_start(dev, false) Eugenio Pérez
2022-07-14 6:45 ` Jason Wang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).