From: Alex Williamson <alex.williamson@hp.com>
To: kvm <kvm@vger.kernel.org>
Cc: Mark McLoughlin <markmc@redhat.com>, qemu-devel <qemu-devel@nongnu.org>
Subject: [Qemu-devel] [PATCH 5/5] virtio-net: Add additional MACs via a filter table
Date: Tue, 13 Jan 2009 14:24:02 -0700 [thread overview]
Message-ID: <1231881842.9095.196.camel@bling> (raw)
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
---
qemu/hw/virtio-net.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++-
qemu/hw/virtio-net.h | 4 +++
2 files changed, 77 insertions(+), 1 deletions(-)
diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index 99e582f..69b7511 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -21,7 +21,7 @@
#define TAP_VNET_HDR
-#define VIRTIO_VM_VERSION 3
+#define VIRTIO_VM_VERSION 4
typedef struct VirtIONet
{
@@ -36,6 +36,11 @@ typedef struct VirtIONet
int mergeable_rx_bufs;
int promisc;
int allmulti;
+ struct {
+ int entries;
+ int in_use;
+ uint8_t *macs;
+ } mac_table;
} VirtIONet;
/* TODO
@@ -69,6 +74,17 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
}
}
+static void virtio_net_reset(VirtIODevice *vdev)
+{
+ VirtIONet *n = to_virtio_net(vdev);
+
+ /* Allow the MAC filter table to be re-allocated after a device reset */
+ n->mac_table.in_use = 0;
+ n->mac_table.entries = 0;
+ qemu_free(n->mac_table.macs);
+ n->mac_table.macs = NULL;
+}
+
static uint32_t virtio_net_get_features(VirtIODevice *vdev)
{
uint32_t features = (1 << VIRTIO_NET_F_MAC);
@@ -157,10 +173,46 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
n->allmulti = *on;
else
*status = VIRTIO_NET_ERR;
+
+ } else if (ctrl->class == VIRTIO_NET_CTRL_MAC_TABLE) {
+ if (ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_ALLOC) {
+ uint32_t *entries;
+
+ if (n->mac_table.entries || elem.out_num != 2 ||
+ elem.out_sg[1].iov_len != sizeof(*entries)) {
+ *status = VIRTIO_NET_ERR;
+ goto reply;
+ }
+
+ entries = (void *)elem.out_sg[1].iov_base;
+ n->mac_table.macs = qemu_mallocz(*entries * 6);
+ if (!n->mac_table.macs) {
+ *status = VIRTIO_NET_ERR;
+ goto reply;
+ }
+
+ n->mac_table.entries = *entries;
+ *status = VIRTIO_NET_OK;
+ } else if (ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET) {
+ if (!n->mac_table.entries || (elem.out_num == 2 &&
+ (elem.out_sg[1].iov_len / 6) > n->mac_table.entries)) {
+ *status = VIRTIO_NET_ERR;
+ goto reply;
+ }
+
+ if (elem.out_num == 2) {
+ memcpy(n->mac_table.macs, elem.out_sg[1].iov_base,
+ elem.out_sg[1].iov_len);
+ n->mac_table.in_use = elem.out_sg[1].iov_len / 6;
+ } else {
+ n->mac_table.in_use = 0;
+ }
+ }
} else {
*status = VIRTIO_NET_ERR;
}
+reply:
virtqueue_push(vq, &elem, sizeof(*status));
virtio_notify(vdev, vq);
}
@@ -276,6 +328,7 @@ static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt,
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
{
static uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ int i;
if (n->promisc)
return 1;
@@ -289,6 +342,11 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
if (!memcmp(buf, n->mac, 6))
return 1;
+ for (i = 0; i < n->mac_table.in_use; i++) {
+ if (!memcmp(buf, &n->mac_table.macs[i * 6], 6))
+ return 1;
+ }
+
return 0;
}
@@ -458,6 +516,10 @@ static void virtio_net_save(QEMUFile *f, void *opaque)
#endif
qemu_put_be32(f, n->promisc);
qemu_put_be32(f, n->allmulti);
+ qemu_put_be32(f, n->mac_table.entries);
+ qemu_put_be32(f, n->mac_table.in_use);
+ if (n->mac_table.entries)
+ qemu_put_buffer(f, n->mac_table.macs, n->mac_table.entries * 6);
}
static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
@@ -485,6 +547,15 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
n->promisc = 1;
}
+ if (version_id >= 4) {
+ n->mac_table.entries = qemu_get_be32(f);
+ n->mac_table.in_use = qemu_get_be32(f);
+ if (n->mac_table.entries) {
+ n->mac_table.macs = qemu_mallocz(n->mac_table.entries * 6);
+ qemu_get_buffer(f, n->mac_table.macs, n->mac_table.entries * 6);
+ }
+ }
+
if (n->tx_timer_active) {
qemu_mod_timer(n->tx_timer,
qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
@@ -509,6 +580,7 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
n->vdev.set_config = virtio_net_set_config;
n->vdev.get_features = virtio_net_get_features;
n->vdev.set_features = virtio_net_set_features;
+ n->vdev.reset = virtio_net_reset;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 16, virtio_net_handle_ctrl);
diff --git a/qemu/hw/virtio-net.h b/qemu/hw/virtio-net.h
index beae4a4..4111036 100644
--- a/qemu/hw/virtio-net.h
+++ b/qemu/hw/virtio-net.h
@@ -84,4 +84,8 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn);
#define VIRTIO_NET_CTRL_RX_MODE_PROMISC 0
#define VIRTIO_NET_CTRL_RX_MODE_ALLMULTI 1
+#define VIRTIO_NET_CTRL_MAC_TABLE 1
+ #define VIRTIO_NET_CTRL_MAC_TABLE_ALLOC 0
+ #define VIRTIO_NET_CTRL_MAC_TABLE_SET 1
+
#endif
next reply other threads:[~2009-01-13 21:24 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-01-13 21:24 Alex Williamson [this message]
2009-01-14 10:15 ` [Qemu-devel] Re: [PATCH 5/5] virtio-net: Add additional MACs via a filter table Mark McLoughlin
2009-01-14 16:54 ` Alex Williamson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1231881842.9095.196.camel@bling \
--to=alex.williamson@hp.com \
--cc=kvm@vger.kernel.org \
--cc=markmc@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).