* [Qemu-devel] [PATCH v2 1/5] virtio-net: use the backend cross-endian capabilities
2016-01-11 16:02 [Qemu-devel] [PATCH v2 0/5] virtio/vhost cross-endian cleanup Greg Kurz
@ 2016-01-11 16:02 ` Greg Kurz
2016-01-11 16:02 ` [Qemu-devel] [PATCH v2 2/5] Revert "vhost-net: tell tap backend about the vnet endianness" Greg Kurz
2016-01-11 16:07 ` [Qemu-devel] [PATCH v2 0/5] virtio/vhost cross-endian cleanup Greg Kurz
2 siblings, 0 replies; 5+ messages in thread
From: Greg Kurz @ 2016-01-11 16:02 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: qemu-devel
When running a fully emulated device in cross-endian conditions, including
a virtio 1.0 device offered to a big endian guest, we need to fix the vnet
headers. This is currently handled by the virtio_net_hdr_swap() function
in the core virtio-net code but it should actually be handled by the net
backend.
With this patch, virtio-net now tries to configure the backend to do the
endian fixing when the device starts (i.e. drivers sets the CONFIG_OK bit).
If the backend cannot support the requested endiannes, we have to fallback
onto virtio_net_hdr_swap(): this is recorded in the needs_vnet_hdr_swap flag,
to be used in the TX and RX paths.
Note that we reset the backend to the default behaviour (guest native
endianness) when the device stops (i.e. device status had CONFIG_OK bit and
driver unsets it). This is needed, with the linux tap backend at least,
otherwise the guest may loose network connectivity if rebooted into a
different endianness.
The current vhost-net code also tries to configure net backends. This will
be no more needed and will be reverted in a subsequent patch.
v2:
- dropped useless check in the 'else if' branch in virtio_net_vnet_status()
- merged virtio_net_vhost_status() change from patch 2
- use semicolon in "backend does no support..." error message
- merged patch 3 (drop the virtio_needs_swap() helper)
- provided some more details in changelog and comments
Signed-off-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
---
hw/net/virtio-net.c | 49 +++++++++++++++++++++++++++++++++++--
include/hw/virtio/virtio-access.h | 9 -------
include/hw/virtio/virtio-net.h | 1 +
3 files changed, 48 insertions(+), 11 deletions(-)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index a877614e3e7a..497fb7119a08 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -128,6 +128,13 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
if (!n->vhost_started) {
int r, i;
+ if (n->needs_vnet_hdr_swap) {
+ error_report("backend does not support %s vnet headers; "
+ "falling back on userspace virtio",
+ virtio_is_big_endian(vdev) ? "BE" : "LE");
+ return;
+ }
+
/* Any packets outstanding? Purge them to avoid touching rings
* when vhost is running.
*/
@@ -152,6 +159,40 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
}
}
+static void virtio_net_vnet_status(VirtIONet *n, uint8_t status)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ NetClientState *peer = qemu_get_queue(n->nic)->peer;
+
+ if (virtio_net_started(n, status)) {
+ int r;
+
+ /* Before using the device, we tell the network backend about the
+ * endianness to use when parsing vnet headers. If the backend can't
+ * do it, we fallback onto fixing the headers in the core virtio-net
+ * code.
+ */
+ if (virtio_is_big_endian(vdev)) {
+ r = qemu_set_vnet_be(peer, true);
+ } else {
+ r = qemu_set_vnet_le(peer, true);
+ }
+
+ n->needs_vnet_hdr_swap = !!r;
+ } else if (virtio_net_started(n, vdev->status)) {
+ /* After using the device, we need to reset the network backend to
+ * the default (guest native endianness), otherwise the guest may
+ * loose network connectivity if it is rebooted into a different
+ * endianness.
+ */
+ if (virtio_is_big_endian(vdev)) {
+ qemu_set_vnet_be(peer, false);
+ } else {
+ qemu_set_vnet_le(peer, false);
+ }
+ }
+}
+
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = VIRTIO_NET(vdev);
@@ -159,6 +200,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
int i;
uint8_t queue_status;
+ virtio_net_vnet_status(n, status);
virtio_net_vhost_status(n, status);
for (i = 0; i < n->max_queues; i++) {
@@ -957,7 +999,10 @@ static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
void *wbuf = (void *)buf;
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
size - n->host_hdr_len);
- virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+
+ if (n->needs_vnet_hdr_swap) {
+ virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+ }
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
} else {
struct virtio_net_hdr hdr = {
@@ -1167,7 +1212,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
error_report("virtio-net header incorrect");
exit(1);
}
- if (virtio_needs_swap(vdev)) {
+ if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
sg2[0].iov_base = &mhdr;
sg2[0].iov_len = n->guest_hdr_len;
diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h
index 8aec843c8ff3..a01fff2e51d7 100644
--- a/include/hw/virtio/virtio-access.h
+++ b/include/hw/virtio/virtio-access.h
@@ -143,15 +143,6 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr)
}
}
-static inline bool virtio_needs_swap(VirtIODevice *vdev)
-{
-#ifdef HOST_WORDS_BIGENDIAN
- return virtio_access_is_big_endian(vdev) ? false : true;
-#else
- return virtio_access_is_big_endian(vdev) ? true : false;
-#endif
-}
-
static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
{
#ifdef HOST_WORDS_BIGENDIAN
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index f3cc25feca2b..27bc868fbc7d 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -94,6 +94,7 @@ typedef struct VirtIONet {
uint64_t curr_guest_offloads;
QEMUTimer *announce_timer;
int announce_counter;
+ bool needs_vnet_hdr_swap;
} VirtIONet;
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [Qemu-devel] [PATCH v2 2/5] Revert "vhost-net: tell tap backend about the vnet endianness"
2016-01-11 16:02 [Qemu-devel] [PATCH v2 0/5] virtio/vhost cross-endian cleanup Greg Kurz
2016-01-11 16:02 ` [Qemu-devel] [PATCH v2 1/5] virtio-net: use the backend cross-endian capabilities Greg Kurz
@ 2016-01-11 16:02 ` Greg Kurz
2016-01-11 16:07 ` [Qemu-devel] [PATCH v2 0/5] virtio/vhost cross-endian cleanup Greg Kurz
2 siblings, 0 replies; 5+ messages in thread
From: Greg Kurz @ 2016-01-11 16:02 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: qemu-devel
This reverts commit 5be7d9f1b1452613b95c6ba70b8d7ad3d0797991.
Cross-endian is now handled by the core virtio-net code.
- moved changes not belonging to the revert to patch 1
- updated changelog accordingly
Signed-off-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
---
hw/net/vhost_net.c | 33 +--------------------------------
1 file changed, 1 insertion(+), 32 deletions(-)
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 318c3e6ad213..0c7362b7a772 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -38,7 +38,6 @@
#include "standard-headers/linux/virtio_ring.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-access.h"
struct vhost_net {
struct vhost_dev dev;
@@ -199,27 +198,6 @@ static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index)
net->dev.vq_index = vq_index;
}
-static int vhost_net_set_vnet_endian(VirtIODevice *dev, NetClientState *peer,
- bool set)
-{
- int r = 0;
-
- if (virtio_vdev_has_feature(dev, VIRTIO_F_VERSION_1) ||
- (virtio_legacy_is_cross_endian(dev) && !virtio_is_big_endian(dev))) {
- r = qemu_set_vnet_le(peer, set);
- if (r) {
- error_report("backend does not support LE vnet headers");
- }
- } else if (virtio_legacy_is_cross_endian(dev)) {
- r = qemu_set_vnet_be(peer, set);
- if (r) {
- error_report("backend does not support BE vnet headers");
- }
- }
-
- return r;
-}
-
static int vhost_net_start_one(struct vhost_net *net,
VirtIODevice *dev)
{
@@ -308,11 +286,6 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
goto err;
}
- r = vhost_net_set_vnet_endian(dev, ncs[0].peer, true);
- if (r < 0) {
- goto err;
- }
-
for (i = 0; i < total_queues; i++) {
vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2);
}
@@ -320,7 +293,7 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
if (r < 0) {
error_report("Error binding guest notifier: %d", -r);
- goto err_endian;
+ goto err;
}
for (i = 0; i < total_queues; i++) {
@@ -342,8 +315,6 @@ err_start:
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
fflush(stderr);
}
-err_endian:
- vhost_net_set_vnet_endian(dev, ncs[0].peer, false);
err:
return r;
}
@@ -366,8 +337,6 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
fflush(stderr);
}
assert(r >= 0);
-
- assert(vhost_net_set_vnet_endian(dev, ncs[0].peer, false) >= 0);
}
void vhost_net_cleanup(struct vhost_net *net)
^ permalink raw reply related [flat|nested] 5+ messages in thread