qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Wen Congyang <wency@cn.fujitsu.com>,
	qemu-devl <qemu-devel@nongnu.org>,
	"Michael S. Tsirkin" <mst@redhat.com>
Cc: Fam Zheng <famz@redhat.com>, Stefan Hajnoczi <stefanha@redhat.com>
Subject: Re: [Qemu-devel] [PATCH v3 for-2.4] virtio-net: remove virtio queues if the guest doesn't support multiqueue
Date: Thu, 16 Jul 2015 10:47:31 +0800	[thread overview]
Message-ID: <55A71B43.2070807@redhat.com> (raw)
In-Reply-To: <55A625FB.5030808@cn.fujitsu.com>



On 07/15/2015 05:20 PM, Wen Congyang wrote:
> commit da51a335 adds all queues in .realize(). But if the
> guest doesn't support multiqueue, we forget to remove them. And
> we cannot handle the ctrl vq corretly. The guest will hang.
>
> Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
> ---

Acked-by: Jason Wang <jasowang@redhat.com>

>  hw/net/virtio-net.c | 110 +++++++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 82 insertions(+), 28 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index e3c2db3..f406954 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -1306,9 +1306,86 @@ static void virtio_net_tx_bh(void *opaque)
>      }
>  }
>  
> +static void virtio_net_add_queue(VirtIONet *n, int index)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +
> +    n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
> +    if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
> +        n->vqs[index].tx_vq =
> +            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> +        n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> +                                              virtio_net_tx_timer,
> +                                              &n->vqs[index]);
> +    } else {
> +        n->vqs[index].tx_vq =
> +            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> +        n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
> +    }
> +
> +    n->vqs[index].tx_waiting = 0;
> +    n->vqs[index].n = n;
> +}
> +
> +static void virtio_net_del_queue(VirtIONet *n, int index)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    VirtIONetQueue *q = &n->vqs[index];
> +    NetClientState *nc = qemu_get_subqueue(n->nic, index);
> +
> +    qemu_purge_queued_packets(nc);
> +
> +    virtio_del_queue(vdev, index * 2);
> +    if (q->tx_timer) {
> +        timer_del(q->tx_timer);
> +        timer_free(q->tx_timer);
> +    } else {
> +        qemu_bh_delete(q->tx_bh);
> +    }
> +    virtio_del_queue(vdev, index * 2 + 1);
> +}
> +
> +static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    int old_num_queues = virtio_get_num_queues(vdev);
> +    int new_num_queues = new_max_queues * 2 + 1;
> +    int i;
> +
> +    assert(old_num_queues >= 3);
> +    assert(old_num_queues % 2 == 1);
> +
> +    if (old_num_queues == new_num_queues) {
> +        return;
> +    }
> +
> +    /*
> +     * We always need to remove and add ctrl vq if
> +     * old_num_queues != new_num_queues. Remove ctrl_vq first,
> +     * and then we only enter one of the following too loops.
> +     */
> +    virtio_del_queue(vdev, old_num_queues - 1);
> +
> +    for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
> +        /* new_num_queues < old_num_queues */
> +        virtio_net_del_queue(n, i / 2);
> +    }
> +
> +    for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
> +        /* new_num_queues > old_num_queues */
> +        virtio_net_add_queue(n, i / 2);
> +    }
> +
> +    /* add ctrl_vq last */
> +    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
> +}
> +
>  static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
>  {
> +    int max = multiqueue ? n->max_queues : 1;
> +
>      n->multiqueue = multiqueue;
> +    virtio_net_change_num_queues(n, max);
>  
>      virtio_net_set_queues(n);
>  }
> @@ -1583,21 +1660,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>      }
>  
>      for (i = 0; i < n->max_queues; i++) {
> -        n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
> -        if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
> -            n->vqs[i].tx_vq =
> -                virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> -            n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> -                                              virtio_net_tx_timer,
> -                                              &n->vqs[i]);
> -        } else {
> -            n->vqs[i].tx_vq =
> -                virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> -            n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
> -        }
> -
> -        n->vqs[i].tx_waiting = 0;
> -        n->vqs[i].n = n;
> +        virtio_net_add_queue(n, i);
>      }
>  
>      n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
> @@ -1651,7 +1714,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>      VirtIONet *n = VIRTIO_NET(dev);
> -    int i;
> +    int i, max_queues;
>  
>      /* This will stop vhost backend if appropriate. */
>      virtio_net_set_status(vdev, 0);
> @@ -1666,18 +1729,9 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
>      g_free(n->mac_table.macs);
>      g_free(n->vlans);
>  
> -    for (i = 0; i < n->max_queues; i++) {
> -        VirtIONetQueue *q = &n->vqs[i];
> -        NetClientState *nc = qemu_get_subqueue(n->nic, i);
> -
> -        qemu_purge_queued_packets(nc);
> -
> -        if (q->tx_timer) {
> -            timer_del(q->tx_timer);
> -            timer_free(q->tx_timer);
> -        } else if (q->tx_bh) {
> -            qemu_bh_delete(q->tx_bh);
> -        }
> +    max_queues = n->multiqueue ? n->max_queues : 1;
> +    for (i = 0; i < max_queues; i++) {
> +        virtio_net_del_queue(n, i);
>      }
>  
>      timer_del(n->announce_timer);

      reply	other threads:[~2015-07-16  2:47 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-15  9:20 [Qemu-devel] [PATCH v3 for-2.4] virtio-net: remove virtio queues if the guest doesn't support multiqueue Wen Congyang
2015-07-16  2:47 ` Jason Wang [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=55A71B43.2070807@redhat.com \
    --to=jasowang@redhat.com \
    --cc=famz@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=wency@cn.fujitsu.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).