From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from [140.186.70.92] (port=51012 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1PLHni-0000Fk-Qo for qemu-devel@nongnu.org; Wed, 24 Nov 2010 11:02:38 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1PLHeq-0004jY-6l for qemu-devel@nongnu.org; Wed, 24 Nov 2010 10:55:35 -0500 Received: from mx1.redhat.com ([209.132.183.28]:15312) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1PLHep-0004jL-UK for qemu-devel@nongnu.org; Wed, 24 Nov 2010 10:53:20 -0500 Date: Wed, 24 Nov 2010 17:53:05 +0200 From: "Michael S. Tsirkin" Message-ID: References: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: Subject: [Qemu-devel] [PATCHv2 4/6] virtio-net: stop/start bh on vm start/stop List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: jasowang@redhat.com, Anthony Liguori , qemu-devel@nongnu.org, quintela@redhat.com Avoid sending out packets, and modifying device state, when VM is stopped. Add a bunch or assert statements to verify this does not happen. Signed-off-by: Michael S. Tsirkin Tested-by: Jason Wang --- hw/virtio-net.c | 36 ++++++++++++++++++++++++++---------- 1 files changed, 26 insertions(+), 10 deletions(-) diff --git a/hw/virtio-net.c b/hw/virtio-net.c index 43a2b3d..366a801 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -675,11 +675,12 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) { VirtQueueElement elem; int32_t num_packets = 0; - if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) { return num_packets; } + assert(n->vm_running); + if (n->async_tx.elem.out_num) { virtio_queue_set_notification(n->tx_vq, 0); return num_packets; @@ -737,6 +738,7 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = to_virtio_net(vdev); + assert(n->vm_running); if (n->tx_waiting) { virtio_queue_set_notification(vq, 1); @@ -754,6 +756,7 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = to_virtio_net(vdev); + assert(n->vm_running); if (unlikely(n->tx_waiting)) { return; @@ -766,6 +769,7 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) static void virtio_net_tx_timer(void *opaque) { VirtIONet *n = opaque; + assert(n->vm_running); n->tx_waiting = 0; @@ -781,6 +785,9 @@ static void virtio_net_tx_bh(void *opaque) { VirtIONet *n = opaque; int32_t ret; + if (!n->vm_running) { + return; + } n->tx_waiting = 0; @@ -926,15 +933,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) } } n->mac_table.first_multi = i; - - if (n->tx_waiting) { - if (n->tx_timer) { - qemu_mod_timer(n->tx_timer, - qemu_get_clock(vm_clock) + n->tx_timeout); - } else { - qemu_bh_schedule(n->tx_bh); - } - } return 0; } @@ -962,6 +960,24 @@ static void virtio_net_vmstate_change(void *opaque, int running, int reason) * it will start/stop vhost backend if appropriate * e.g. after migration. */ virtio_net_set_status(&n->vdev, n->vdev.status); + + if (!n->tx_waiting) { + return; + } + if (running) { + if (n->tx_timer) { + qemu_mod_timer(n->tx_timer, + qemu_get_clock(vm_clock) + n->tx_timeout); + } else { + qemu_bh_schedule(n->tx_bh); + } + } else { + if (n->tx_timer) { + qemu_del_timer(n->tx_timer); + } else { + qemu_bh_cancel(n->tx_bh); + } + } } VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf, -- 1.7.3.2.91.g446ac