From: "Michael S. Tsirkin" <mst@redhat.com>
To: Anthony Liguori <anthony@codemonkey.ws>, qemu-devel@nongnu.org
Cc: amit.shah@redhat.com, kraxel@redhat.com, quintela@redhat.com
Subject: [Qemu-devel] [PATCHv4 12/12] virtio-net: vhost net support
Date: Wed, 3 Mar 2010 19:16:53 +0200 [thread overview]
Message-ID: <20b623baa48aca4af8e98e12bfc1b8c6e4360eb5.1267636215.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1267636215.git.mst@redhat.com>
This connects virtio-net to vhost net backend.
The code is structured in a way analogous to what we have with vnet
header capability in tap.
We start/stop backend on driver start/stop as
well as on save and vm start (for migration).
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
hw/virtio-net.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 69 insertions(+), 2 deletions(-)
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 5c0093e..9ddd58c 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -17,6 +17,7 @@
#include "net/tap.h"
#include "qemu-timer.h"
#include "virtio-net.h"
+#include "vhost_net.h"
#define VIRTIO_NET_VM_VERSION 11
@@ -47,6 +48,8 @@ typedef struct VirtIONet
uint8_t nomulti;
uint8_t nouni;
uint8_t nobcast;
+ uint8_t vhost_started;
+ VMChangeStateEntry *vmstate;
struct {
int in_use;
int first_multi;
@@ -114,6 +117,10 @@ static void virtio_net_reset(VirtIODevice *vdev)
n->nomulti = 0;
n->nouni = 0;
n->nobcast = 0;
+ if (n->vhost_started) {
+ vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev);
+ n->vhost_started = 0;
+ }
/* Flush any MAC and VLAN filter table state */
n->mac_table.in_use = 0;
@@ -172,7 +179,14 @@ static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
}
- return features;
+ if (!n->nic->nc.peer ||
+ n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
+ return features;
+ }
+ if (!tap_get_vhost_net(n->nic->nc.peer)) {
+ return features;
+ }
+ return vhost_net_get_features(tap_get_vhost_net(n->nic->nc.peer), features);
}
static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
@@ -698,6 +712,12 @@ static void virtio_net_save(QEMUFile *f, void *opaque)
{
VirtIONet *n = opaque;
+ if (n->vhost_started) {
+ /* TODO: should we really stop the backend?
+ * If we don't, it might keep writing to memory. */
+ vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), &n->vdev);
+ n->vhost_started = 0;
+ }
virtio_save(&n->vdev, f);
qemu_put_buffer(f, n->mac, ETH_ALEN);
@@ -810,7 +830,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
qemu_mod_timer(n->tx_timer,
qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
}
-
return 0;
}
@@ -830,6 +849,47 @@ static NetClientInfo net_virtio_info = {
.link_status_changed = virtio_net_set_link_status,
};
+static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
+{
+ VirtIONet *n = to_virtio_net(vdev);
+ if (!n->nic->nc.peer) {
+ return;
+ }
+ if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
+ return;
+ }
+
+ if (!tap_get_vhost_net(n->nic->nc.peer)) {
+ return;
+ }
+ if (!!n->vhost_started == !!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ return;
+ }
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ int r = vhost_net_start(tap_get_vhost_net(n->nic->nc.peer), vdev);
+ if (r < 0) {
+ fprintf(stderr, "unable to start vhost net: %d: "
+ "falling back on userspace virtio\n", -r);
+ } else {
+ n->vhost_started = 1;
+ }
+ } else {
+ vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev);
+ n->vhost_started = 0;
+ }
+}
+
+static void virtio_net_vmstate_change(void *opaque, int running, int reason)
+{
+ VirtIONet *n = opaque;
+ if (!running) {
+ return;
+ }
+ /* This is called when vm is started, it will start vhost backend if
+ * appropriate e.g. after migration. */
+ virtio_net_set_status(&n->vdev, n->vdev.status);
+}
+
VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf)
{
VirtIONet *n;
@@ -845,6 +905,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf)
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
+ n->vdev.set_status = virtio_net_set_status;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
@@ -867,6 +928,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf)
register_savevm("virtio-net", virtio_net_id++, VIRTIO_NET_VM_VERSION,
virtio_net_save, virtio_net_load, n);
+ n->vmstate = qemu_add_vm_change_state_handler(virtio_net_vmstate_change, n);
return &n->vdev;
}
@@ -874,6 +936,11 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf)
void virtio_net_exit(VirtIODevice *vdev)
{
VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev);
+ qemu_del_vm_change_state_handler(n->vmstate);
+
+ if (n->vhost_started) {
+ vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev);
+ }
qemu_purge_queued_packets(&n->nic->nc);
--
1.7.0.18.g0d53a5
prev parent reply other threads:[~2010-03-03 17:20 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-03-03 17:15 [Qemu-devel] [PATCHv4 00/12] vhost-net: upstream integration Michael S. Tsirkin
2010-03-03 17:15 ` [Qemu-devel] [PATCHv4 01/12] tap: add interface to get device fd Michael S. Tsirkin
2010-03-03 17:15 ` [Qemu-devel] [PATCHv4 02/12] kvm: add API to set ioeventfd Michael S. Tsirkin
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 03/12] notifier: event notifier implementation Michael S. Tsirkin
2010-03-05 12:03 ` [Qemu-devel] " Amit Shah
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 04/12] virtio: add notifier support Michael S. Tsirkin
2010-03-05 12:04 ` [Qemu-devel] " Amit Shah
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 05/12] virtio: add APIs for queue fields Michael S. Tsirkin
2010-03-05 12:10 ` [Qemu-devel] " Amit Shah
2010-03-06 19:09 ` Michael S. Tsirkin
2010-03-05 13:08 ` Amit Shah
2010-03-06 19:07 ` Michael S. Tsirkin
2010-03-08 6:16 ` Amit Shah
2010-03-08 18:11 ` Michael S. Tsirkin
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 06/12] virtio: add set_status callback Michael S. Tsirkin
2010-03-04 12:19 ` Amit Shah
2010-03-04 12:20 ` Michael S. Tsirkin
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 07/12] virtio: move typedef to qemu-common Michael S. Tsirkin
2010-03-04 12:20 ` Amit Shah
2010-03-04 12:19 ` Michael S. Tsirkin
2010-03-04 12:29 ` Amit Shah
2010-03-04 12:31 ` Michael S. Tsirkin
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 08/12] virtio-pci: fill in notifier support Michael S. Tsirkin
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 09/12] vhost: vhost net support Michael S. Tsirkin
2010-03-05 18:19 ` [Qemu-devel] " Amit Shah
2010-03-06 19:06 ` Michael S. Tsirkin
2010-03-08 6:20 ` Amit Shah
2010-03-16 15:37 ` Michael S. Tsirkin
2010-03-17 4:09 ` Amit Shah
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 10/12] tap: add vhost/vhostfd options Michael S. Tsirkin
2010-03-03 17:16 ` [Qemu-devel] [PATCHv4 11/12] tap: add API to retrieve vhost net header Michael S. Tsirkin
2010-03-03 17:16 ` Michael S. Tsirkin [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20b623baa48aca4af8e98e12bfc1b8c6e4360eb5.1267636215.git.mst@redhat.com \
--to=mst@redhat.com \
--cc=amit.shah@redhat.com \
--cc=anthony@codemonkey.ws \
--cc=kraxel@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).