From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from [140.186.70.92] (port=44899 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1PN5nU-0008ND-Du for qemu-devel@nongnu.org; Mon, 29 Nov 2010 10:37:46 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1PN5nS-0006Od-1l for qemu-devel@nongnu.org; Mon, 29 Nov 2010 10:37:44 -0500 Received: from mx1.redhat.com ([209.132.183.28]:63597) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1PN5nR-0006Nr-QL for qemu-devel@nongnu.org; Mon, 29 Nov 2010 10:37:42 -0500 Date: Mon, 29 Nov 2010 17:37:18 +0200 From: "Michael S. Tsirkin" Message-ID: <20101129153718.GA2494@redhat.com> References: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: Subject: [Qemu-devel] [PATCHv3 4/6] virtio-net: stop/start bh when appropriate List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: jasowang@redhat.com, Anthony Liguori , qemu-devel@nongnu.org, quintela@redhat.com Avoid sending out packets, and modifying device state, when VM is stopped. Add assert statements to verify this does not happen. Avoid scheduling bh when vhost-net is started. Stop bh when driver disabled bus mastering (we must not access memory after this). Signed-off-by: Michael S. Tsirkin --- Respinning just this one patch: virtio-net: stop/start bh on vm start/stop it turns out under kvm vcpu can be running after vm stop notifier callbacks are done. And it makes sense: vcpu is just another device, so we should not depend on the order of vmstate notifiers. The state callback is thus a request for device to avoid changing state of other devices, not a guarantee that other devices will not send requests to it. diff --git a/hw/virtio-net.c b/hw/virtio-net.c index 43a2b3d..260433f 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -99,7 +99,13 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) } } -static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) +bool virtio_net_started(VirtIONet *n, uint8_t status) +{ + return (status & VIRTIO_CONFIG_S_DRIVER_OK) && + (n->status & VIRTIO_NET_S_LINK_UP) && n->vm_running; +} + +static void virtio_net_vhost_status(struct VirtIODevice *vdev, uint8_t status) { VirtIONet *n = to_virtio_net(vdev); if (!n->nic->nc.peer) { @@ -112,9 +118,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) if (!tap_get_vhost_net(n->nic->nc.peer)) { return; } - if (!!n->vhost_started == ((status & VIRTIO_CONFIG_S_DRIVER_OK) && - (n->status & VIRTIO_NET_S_LINK_UP) && - n->vm_running)) { + if (!!n->vhost_started == virtio_net_started(n, status)) { return; } if (!n->vhost_started) { @@ -131,6 +135,30 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) } } +static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) +{ + virtio_net_vhost_status(vdev, status); + + if (!n->tx_waiting) { + return; + } + + if (virtio_net_started(n, status) && !n->vhost_started) { + if (n->tx_timer) { + qemu_mod_timer(n->tx_timer, + qemu_get_clock(vm_clock) + n->tx_timeout); + } else { + qemu_bh_schedule(n->tx_bh); + } + } else { + if (n->tx_timer) { + qemu_del_timer(n->tx_timer); + } else { + qemu_bh_cancel(n->tx_bh); + } + } +} + static void virtio_net_set_link_status(VLANClientState *nc) { VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; @@ -675,11 +703,13 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) { VirtQueueElement elem; int32_t num_packets = 0; if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) { return num_packets; } + assert(n->vm_running); + if (n->async_tx.elem.out_num) { virtio_queue_set_notification(n->tx_vq, 0); return num_packets; @@ -738,6 +767,12 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = to_virtio_net(vdev); + /* This happens when device was stopped but VCPU wasn't. */ + if (!n->vm_running) { + n->tx_waiting = 1; + return; + } + if (n->tx_waiting) { virtio_queue_set_notification(vq, 1); qemu_del_timer(n->tx_timer); @@ -758,14 +793,19 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) if (unlikely(n->tx_waiting)) { return; } + n->tx_waiting = 1; + /* This happens when device was stopped but VCPU wasn't. */ + if (!n->vm_running) { + return; + } virtio_queue_set_notification(vq, 0); qemu_bh_schedule(n->tx_bh); - n->tx_waiting = 1; } static void virtio_net_tx_timer(void *opaque) { VirtIONet *n = opaque; + assert(n->vm_running); n->tx_waiting = 0; @@ -782,6 +822,8 @@ static void virtio_net_tx_bh(void *opaque) VirtIONet *n = opaque; int32_t ret; + assert(n->vm_running); + n->tx_waiting = 0; /* Just in case the driver is not ready on more */ @@ -926,15 +968,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) } } n->mac_table.first_multi = i; - - if (n->tx_waiting) { - if (n->tx_timer) { - qemu_mod_timer(n->tx_timer, - qemu_get_clock(vm_clock) + n->tx_timeout); - } else { - qemu_bh_schedule(n->tx_bh); - } - } return 0; }