From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org, Anthony Liguori <anthony@codemonkey.ws>,
kraxel@redhat.com
Subject: [Qemu-devel] [PATCH RFC] virtio: add features qdev property
Date: Sun, 13 Dec 2009 22:43:41 +0200 [thread overview]
Message-ID: <20091213204341.GA25823@redhat.com> (raw)
Add features property to virtio. This makes it
possible to e.g. define machine without indirect
buffer support, which is required for 0.10
compatibility. or without hardware checksum
support, which is required for 0.11 compatibility.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
Here's what I came up with for solving
the problem of differences between features
in virtio between 0.12 and 0.11 (applies
on to of guest_features patch).
Comments? Gerd, what do you think?
hw/virtio-pci.c | 29 +++++++++++++++++++++++++++--
hw/virtio.h | 1 +
2 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index 80bc645..43b02b6 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -90,6 +90,7 @@ typedef struct {
uint32_t addr;
uint32_t class_code;
uint32_t nvectors;
+ uint32_t host_features;
DriveInfo *dinfo;
NICConf nic;
} VirtIOPCIProxy;
@@ -235,8 +236,7 @@ static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
switch (addr) {
case VIRTIO_PCI_HOST_FEATURES:
- ret = vdev->get_features(vdev);
- ret |= vdev->binding->get_features(proxy);
+ ret = vdev->host_features;
break;
case VIRTIO_PCI_GUEST_FEATURES:
ret = vdev->guest_features;
@@ -398,6 +398,8 @@ static const VirtIOBindings virtio_pci_bindings = {
.get_features = virtio_pci_get_features,
};
+#define VIRTIO_PCI_NO_FEATURES 0xffffffff
+
static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,
uint16_t vendor, uint16_t device,
uint16_t class_code, uint8_t pif)
@@ -442,6 +444,18 @@ static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,
virtio_map);
virtio_bind_device(vdev, &virtio_pci_bindings, proxy);
+ if (proxy->host_features == VIRTIO_PCI_NO_FEATURES)
+ proxy->host_features = vdev->get_features(vdev) |
+ vdev->binding->get_features(proxy);
+ else if (proxy->host_features & ~vdev->get_features(vdev) &
+ ~vdev->binding->get_features(proxy)) {
+ fprintf(stderr, "Requested host features 0x%x, "
+ "but supported features are transport:0x%x device:0x%x\n",
+ proxy->host_features,
+ vdev->binding->get_features(proxy),
+ vdev->get_features(vdev));
+ exit(1);
+ }
}
static int virtio_blk_init_pci(PCIDevice *pci_dev)
@@ -561,6 +575,8 @@ static PCIDeviceInfo virtio_info[] = {
DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_PROP_DRIVE("drive", VirtIOPCIProxy, dinfo),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_HEX32("features", VirtIOPCIProxy, host_features,
+ VIRTIO_PCI_NO_FEATURES),
DEFINE_PROP_END_OF_LIST(),
},
.qdev.reset = virtio_pci_reset,
@@ -571,6 +587,8 @@ static PCIDeviceInfo virtio_info[] = {
.exit = virtio_net_exit_pci,
.qdev.props = (Property[]) {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
+ DEFINE_PROP_HEX32("features", VirtIOPCIProxy, host_features,
+ 0xffffffff),
DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
DEFINE_PROP_END_OF_LIST(),
},
@@ -582,6 +600,8 @@ static PCIDeviceInfo virtio_info[] = {
.exit = virtio_exit_pci,
.qdev.props = (Property[]) {
DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
+ DEFINE_PROP_HEX32("features", VirtIOPCIProxy, host_features,
+ VIRTIO_PCI_NO_FEATURES),
DEFINE_PROP_END_OF_LIST(),
},
.qdev.reset = virtio_pci_reset,
@@ -590,6 +610,11 @@ static PCIDeviceInfo virtio_info[] = {
.qdev.size = sizeof(VirtIOPCIProxy),
.init = virtio_balloon_init_pci,
.exit = virtio_exit_pci,
+ .qdev.props = (Property[]) {
+ DEFINE_PROP_HEX32("features", VirtIOPCIProxy, host_features,
+ VIRTIO_PCI_NO_FEATURES),
+ DEFINE_PROP_END_OF_LIST(),
+ },
.qdev.reset = virtio_pci_reset,
},{
/* end of list */
diff --git a/hw/virtio.h b/hw/virtio.h
index 85ef171..73f784f 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -101,6 +101,7 @@ struct VirtIODevice
uint8_t isr;
uint16_t queue_sel;
uint32_t guest_features;
+ uint32_t host_features;
size_t config_len;
void *config;
uint16_t config_vector;
--
1.6.6.rc1.43.gf55cc
next reply other threads:[~2009-12-13 20:46 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-12-13 20:43 Michael S. Tsirkin [this message]
2009-12-14 9:41 ` [Qemu-devel] Re: [PATCH RFC] virtio: add features qdev property Gerd Hoffmann
2009-12-14 9:42 ` Michael S. Tsirkin
2009-12-14 10:24 ` Gerd Hoffmann
2009-12-14 11:10 ` Michael S. Tsirkin
2009-12-14 11:37 ` Gerd Hoffmann
2009-12-14 13:15 ` Michael S. Tsirkin
2009-12-14 13:30 ` Markus Armbruster
2009-12-14 13:59 ` Michael S. Tsirkin
2009-12-14 15:01 ` Gerd Hoffmann
2009-12-14 16:23 ` Michael S. Tsirkin
2009-12-14 17:18 ` Gerd Hoffmann
2009-12-14 19:17 ` Michael S. Tsirkin
2009-12-14 20:40 ` Gerd Hoffmann
2009-12-14 20:43 ` Michael S. Tsirkin
2009-12-14 21:12 ` Gerd Hoffmann
2009-12-14 21:14 ` Michael S. Tsirkin
2009-12-14 21:24 ` Gerd Hoffmann
2009-12-14 14:56 ` Gerd Hoffmann
2009-12-14 19:20 ` Michael S. Tsirkin
2009-12-14 20:42 ` Gerd Hoffmann
2009-12-14 11:50 ` Alexander Graf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20091213204341.GA25823@redhat.com \
--to=mst@redhat.com \
--cc=anthony@codemonkey.ws \
--cc=kraxel@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).