kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] kvm: qemu: Move virtqueue_next_desc() around
@ 2008-10-08 19:35 Mark McLoughlin
  2008-10-08 19:35 ` [PATCH 2/5] kvm: qemu: Introduce virtqueue_fill() and virtqueue_flush() Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-10-08 19:35 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Mark McLoughlin

virtio_next_desc() is only used in virtqueue_pop(), so move
it alongside that.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio.c |   30 +++++++++++++++---------------
 1 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index e675f43..b3ee649 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -107,6 +107,21 @@ static void virtqueue_init(VirtQueue *vq, void *p)
     vq->vring.used = (void *)TARGET_PAGE_ALIGN((unsigned long)&vq->vring.avail->ring[vq->vring.num]);
 }
 
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len)
+{
+    VRingUsedElem *used;
+
+    /* Get a pointer to the next entry in the used ring. */
+    used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
+    used->id = elem->index;
+    used->len = len;
+    /* Make sure buffer is written before we update index. */
+    wmb();
+    vq->vring.used->idx++;
+    vq->inuse--;
+}
+
 static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
 {
     unsigned int next;
@@ -126,21 +141,6 @@ static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
     return next;
 }
 
-void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
-		    unsigned int len)
-{
-    VRingUsedElem *used;
-
-    /* Get a pointer to the next entry in the used ring. */
-    used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
-    used->id = elem->index;
-    used->len = len;
-    /* Make sure buffer is written before we update index. */
-    wmb();
-    vq->vring.used->idx++;
-    vq->inuse--;
-}
-
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
 {
     unsigned int i, head;
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/5] kvm: qemu: Introduce virtqueue_fill() and virtqueue_flush()
  2008-10-08 19:35 [PATCH 1/5] kvm: qemu: Move virtqueue_next_desc() around Mark McLoughlin
@ 2008-10-08 19:35 ` Mark McLoughlin
  2008-10-08 19:35   ` [PATCH 3/5] kvm: qemu: Simplify virtio_net_can_receive() a little Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-10-08 19:35 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Mark McLoughlin

Split virtqueue_push() into two logical steps - adding an element
to the used ring and notifying the other side of added elements.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio.c |   23 ++++++++++++++++++-----
 qemu/hw/virtio.h |    3 +++
 2 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index b3ee649..74b898f 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -107,19 +107,32 @@ static void virtqueue_init(VirtQueue *vq, void *p)
     vq->vring.used = (void *)TARGET_PAGE_ALIGN((unsigned long)&vq->vring.avail->ring[vq->vring.num]);
 }
 
-void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
-		    unsigned int len)
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len, unsigned int idx)
 {
     VRingUsedElem *used;
 
+    idx += vq->vring.used->idx;
+
     /* Get a pointer to the next entry in the used ring. */
-    used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
+    used = &vq->vring.used->ring[idx % vq->vring.num];
     used->id = elem->index;
     used->len = len;
+}
+
+void virtqueue_flush(VirtQueue *vq, unsigned int count)
+{
     /* Make sure buffer is written before we update index. */
     wmb();
-    vq->vring.used->idx++;
-    vq->inuse--;
+    vq->vring.used->idx += count;
+    vq->inuse -= count;
+}
+
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len)
+{
+    virtqueue_fill(vq, elem, len, 0);
+    virtqueue_flush(vq, 1);
 }
 
 static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
diff --git a/qemu/hw/virtio.h b/qemu/hw/virtio.h
index 0dcedbf..87a15cc 100644
--- a/qemu/hw/virtio.h
+++ b/qemu/hw/virtio.h
@@ -139,6 +139,9 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
 
 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
 		    unsigned int len);
+void virtqueue_flush(VirtQueue *vq, unsigned int count);
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len, unsigned int idx);
 
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
 
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/5] kvm: qemu: Simplify virtio_net_can_receive() a little
  2008-10-08 19:35 ` [PATCH 2/5] kvm: qemu: Introduce virtqueue_fill() and virtqueue_flush() Mark McLoughlin
@ 2008-10-08 19:35   ` Mark McLoughlin
  2008-10-08 19:35     ` [PATCH 4/5] kvm: qemu: Split iov_fill() out from virtio_net_receive() Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-10-08 19:35 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Mark McLoughlin

In order to de-obfuscate the final patch in this series

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio-net.c |    9 +++++----
 1 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index bc2ede6..4b4c48b 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -140,17 +140,18 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
 static int virtio_net_can_receive(void *opaque)
 {
     VirtIONet *n = opaque;
+    VirtQueue *vq = n->rx_vq;
 
-    if (n->rx_vq->vring.avail == NULL ||
+    if (vq->vring.avail == NULL ||
 	!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
 	return 0;
 
-    if (n->rx_vq->vring.avail->idx == n->rx_vq->last_avail_idx) {
-	n->rx_vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
+    if (vq->vring.avail->idx == vq->last_avail_idx) {
+	vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
 	return 0;
     }
 
-    n->rx_vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
+    vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
     return 1;
 }
 
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/5] kvm: qemu: Split iov_fill() out from virtio_net_receive()
  2008-10-08 19:35   ` [PATCH 3/5] kvm: qemu: Simplify virtio_net_can_receive() a little Mark McLoughlin
@ 2008-10-08 19:35     ` Mark McLoughlin
  2008-10-08 19:35       ` [PATCH 5/5] kvm: qemu: Improve virtio_net recv buffer allocation scheme Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-10-08 19:35 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Mark McLoughlin

Simplifies the current code, but more especially, the code
in the next patch.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio-net.c |   32 +++++++++++++++++++++-----------
 1 files changed, 21 insertions(+), 11 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index 4b4c48b..403247b 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -183,12 +183,27 @@ static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
     }
 }
 
+static int iov_fill(struct iovec *iov, int iovcnt, const void *buf, int count)
+{
+    int offset, i;
+
+    offset = i = 0;
+    while (offset < count && i < iovcnt) {
+	int len = MIN(iov[i].iov_len, count - offset);
+	memcpy(iov[i].iov_base, buf + offset, len);
+	offset += len;
+	i++;
+    }
+
+    return offset;
+}
+
 static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
 {
     VirtIONet *n = opaque;
     VirtQueueElement elem;
     struct virtio_net_hdr *hdr;
-    int offset, i;
+    int offset;
     int total;
 
     if (virtqueue_pop(n->rx_vq, &elem) == 0)
@@ -204,23 +219,18 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
     hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
 
     offset = 0;
-    total = sizeof(*hdr);
+    total = size + sizeof(*hdr);
 
     if (tap_has_vnet_hdr(n->vc->vlan->first_client)) {
 	memcpy(hdr, buf, sizeof(*hdr));
-	offset += total;
+	offset += sizeof(*hdr);
+	total -= offset;
         work_around_broken_dhclient(hdr, buf + offset, size - offset);
     }
 
     /* copy in packet.  ugh */
-    i = 1;
-    while (offset < size && i < elem.in_num) {
-	int len = MIN(elem.in_sg[i].iov_len, size - offset);
-	memcpy(elem.in_sg[i].iov_base, buf + offset, len);
-	offset += len;
-	total += len;
-	i++;
-    }
+    iov_fill(&elem.in_sg[1], elem.in_num - 1,
+	     buf + offset, size - offset);
 
     /* signal other side */
     virtqueue_push(n->rx_vq, &elem, total);
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/5] kvm: qemu: Improve virtio_net recv buffer allocation scheme
  2008-10-08 19:35     ` [PATCH 4/5] kvm: qemu: Split iov_fill() out from virtio_net_receive() Mark McLoughlin
@ 2008-10-08 19:35       ` Mark McLoughlin
  2008-10-12 10:00         ` Avi Kivity
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-10-08 19:35 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Herbert Xu, Mark McLoughlin

From: Herbert Xu <herbert.xu@redhat.com>

Currently, in order to receive large packets, the guest must allocate
max-sized packet buffers and pass them to the host. Each of these
max-sized packets occupy 20 ring entries, which means we can only
transfer a maximum of 12 packets in a single batch with a 256 entry
ring.

When receiving packets from external networks, we only receive MTU
sized packets and so the throughput observed is throttled by the
number of packets the ring can hold.

Implement the VIRTIO_NET_F_MRG_RXBUF feature to let guests know that
we can merge smaller buffers together in order to handle large packets.

This scheme allows us to be efficient in our use of ring entries
while still supporting large packets. Benchmarking using netperf from
an external machine to a guest over a 10Gb/s network shows a 100%
improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark
with GSO disabled on the host side, throughput was seen to increase
from 700Mb/s to 1.7Gb/s.

Based on a patch from Herbert, with the feature renamed from
"datahead" and some re-factoring for readability.

Signed-off-by: Herbert Xu <herbert.xu@redhat.com>
Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio-net.c |   67 +++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 61 insertions(+), 6 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index 403247b..afa5fe5 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -34,9 +34,13 @@
 #define VIRTIO_NET_F_HOST_TSO6	12	/* Host can handle TSOv6 in. */
 #define VIRTIO_NET_F_HOST_ECN	13	/* Host can handle TSO[6] w/ ECN in. */
 #define VIRTIO_NET_F_HOST_UFO	14	/* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF	15	/* Host can merge receive buffers. */
 
 #define TX_TIMER_INTERVAL 150000 /* 150 us */
 
+/* Should be the largest MAX_SKB_FRAGS supported. */
+#define VIRTIO_NET_MAX_FRAGS	18
+
 /* The config defining mac address (6 bytes) */
 struct virtio_net_config
 {
@@ -70,6 +74,7 @@ typedef struct VirtIONet
     VLANClientState *vc;
     QEMUTimer *tx_timer;
     int tx_timer_active;
+    int mergeable_rx_bufs;
 } VirtIONet;
 
 /* TODO
@@ -106,6 +111,7 @@ static uint32_t virtio_net_get_features(VirtIODevice *vdev)
 	features |= (1 << VIRTIO_NET_F_HOST_TSO4);
 	features |= (1 << VIRTIO_NET_F_HOST_TSO6);
 	features |= (1 << VIRTIO_NET_F_HOST_ECN);
+	features |= (1 << VIRTIO_NET_F_MRG_RXBUF);
 	/* Kernel can't actually handle UFO in software currently. */
     }
 
@@ -117,6 +123,8 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
     VirtIONet *n = to_virtio_net(vdev);
     VLANClientState *host = n->vc->vlan->first_client;
 
+    n->mergeable_rx_bufs = !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF));
+
     if (!tap_has_vnet_hdr(host) || !host->set_offload)
 	return;
 
@@ -141,12 +149,15 @@ static int virtio_net_can_receive(void *opaque)
 {
     VirtIONet *n = opaque;
     VirtQueue *vq = n->rx_vq;
+    int min_bufs;
 
     if (vq->vring.avail == NULL ||
 	!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
 	return 0;
 
-    if (vq->vring.avail->idx == vq->last_avail_idx) {
+    min_bufs = n->mergeable_rx_bufs ? VIRTIO_NET_MAX_FRAGS : 1;
+
+    if ((uint16_t)(vq->vring.avail->idx - vq->last_avail_idx) < min_bufs) {
 	vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
 	return 0;
     }
@@ -209,7 +220,12 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
     if (virtqueue_pop(n->rx_vq, &elem) == 0)
 	return;
 
-    if (elem.in_num < 1 || elem.in_sg[0].iov_len != sizeof(*hdr)) {
+    if (n->mergeable_rx_bufs) {
+	if (elem.in_num < 1 || elem.in_sg[0].iov_len < TARGET_PAGE_SIZE) {
+	    fprintf(stderr, "virtio-net IOV is irregular\n");
+	    exit(1);
+	}
+    } else if (elem.in_num < 1 || elem.in_sg[0].iov_len != sizeof(*hdr)) {
 	fprintf(stderr, "virtio-net header not in first element\n");
 	exit(1);
     }
@@ -229,11 +245,49 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
     }
 
     /* copy in packet.  ugh */
-    iov_fill(&elem.in_sg[1], elem.in_num - 1,
-	     buf + offset, size - offset);
 
-    /* signal other side */
-    virtqueue_push(n->rx_vq, &elem, total);
+    if (n->mergeable_rx_bufs) {
+	int i = 0;
+
+	elem.in_sg[0].iov_base += sizeof(*hdr);
+	elem.in_sg[0].iov_len  -= sizeof(*hdr);
+
+	offset += iov_fill(&elem.in_sg[0], elem.in_num,
+			   buf + offset, size - offset);
+
+	/* signal other side */
+	virtqueue_fill(n->rx_vq, &elem, total, i++);
+
+	while (offset < size) {
+	    int len;
+
+	    if (virtqueue_pop(n->rx_vq, &elem) == 0) {
+		fprintf(stderr, "virtio-net truncating packet\n");
+		exit(1);
+	    }
+
+	    if (elem.in_num < 1 || elem.in_sg[0].iov_len < TARGET_PAGE_SIZE) {
+		fprintf(stderr, "virtio-net IOV is irregular\n");
+		exit(1);
+	    }
+
+	    len = iov_fill(&elem.in_sg[0], elem.in_num,
+			   buf + offset, size - offset);
+
+	    virtqueue_fill(n->rx_vq, &elem, len, i++);
+
+	    offset += len;
+	}
+
+	virtqueue_flush(n->rx_vq, i);
+    } else {
+	iov_fill(&elem.in_sg[1], elem.in_num - 1,
+		 buf + offset, size - offset);
+
+	/* signal other side */
+	virtqueue_push(n->rx_vq, &elem, total);
+    }
+
     virtio_notify(&n->vdev, n->rx_vq);
 }
 
@@ -354,6 +408,7 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
 
     n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n);
     n->tx_timer_active = 0;
+    n->mergeable_rx_bufs = 0;
 
     register_savevm("virtio-net", virtio_net_id++, 1,
 		    virtio_net_save, virtio_net_load, n);
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/5] kvm: qemu: Improve virtio_net recv buffer allocation scheme
  2008-10-08 19:35       ` [PATCH 5/5] kvm: qemu: Improve virtio_net recv buffer allocation scheme Mark McLoughlin
@ 2008-10-12 10:00         ` Avi Kivity
  2008-10-14 13:44           ` Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Avi Kivity @ 2008-10-12 10:00 UTC (permalink / raw)
  To: Mark McLoughlin; +Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori

Mark McLoughlin wrote:
> From: Herbert Xu <herbert.xu@redhat.com>
>
> Currently, in order to receive large packets, the guest must allocate
> max-sized packet buffers and pass them to the host. Each of these
> max-sized packets occupy 20 ring entries, which means we can only
> transfer a maximum of 12 packets in a single batch with a 256 entry
> ring.
>
> When receiving packets from external networks, we only receive MTU
> sized packets and so the throughput observed is throttled by the
> number of packets the ring can hold.
>
> Implement the VIRTIO_NET_F_MRG_RXBUF feature to let guests know that
> we can merge smaller buffers together in order to handle large packets.
>
> This scheme allows us to be efficient in our use of ring entries
> while still supporting large packets. Benchmarking using netperf from
> an external machine to a guest over a 10Gb/s network shows a 100%
> improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark
> with GSO disabled on the host side, throughput was seen to increase
> from 700Mb/s to 1.7Gb/s.
>
> Based on a patch from Herbert, with the feature renamed from
> "datahead" and some re-factoring for readability.
>
>
> diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
> index 403247b..afa5fe5 100644
> --- a/qemu/hw/virtio-net.c
> +++ b/qemu/hw/virtio-net.c
> @@ -34,9 +34,13 @@
>  #define VIRTIO_NET_F_HOST_TSO6	12	/* Host can handle TSOv6 in. */
>  #define VIRTIO_NET_F_HOST_ECN	13	/* Host can handle TSO[6] w/ ECN in. */
>  #define VIRTIO_NET_F_HOST_UFO	14	/* Host can handle UFO in. */
> +#define VIRTIO_NET_F_MRG_RXBUF	15	/* Host can merge receive buffers. */
>  
>   

What's the status of the guest side of this feature?

>  #define TX_TIMER_INTERVAL 150000 /* 150 us */
>  
> +/* Should be the largest MAX_SKB_FRAGS supported. */
> +#define VIRTIO_NET_MAX_FRAGS	18
> +
>   

This should be advertised by the host to the guest (or vice-versa?).  
We're embedding Linux-specific magic numbers in a guest-OS-agnostic ABI.

Perfereably, there shouldn't be a limit at all.

> @@ -209,7 +220,12 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
>      if (virtqueue_pop(n->rx_vq, &elem) == 0)
>  	return;
>  
> -    if (elem.in_num < 1 || elem.in_sg[0].iov_len != sizeof(*hdr)) {
> +    if (n->mergeable_rx_bufs) {
> +	if (elem.in_num < 1 || elem.in_sg[0].iov_len < TARGET_PAGE_SIZE) {
> +	    fprintf(stderr, "virtio-net IOV is irregular\n");
> +	    exit(1);
> +	}
>   

Again, this is burying details of the current Linux stack into the ABI.  
The Linux stack may change not to be page oriented, or maybe this won't 
fit will to how Windows views things.  Can this be made not to depend on 
the size of the iov elements?

> +    } else if (elem.in_num < 1 || elem.in_sg[0].iov_len != sizeof(*hdr)) {
>  	fprintf(stderr, "virtio-net header not in first element\n");
>  	exit(1);
>      }
> @@ -229,11 +245,49 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
>      }
>  
>      /* copy in packet.  ugh */
> -    iov_fill(&elem.in_sg[1], elem.in_num - 1,
> -	     buf + offset, size - offset);
>  
> -    /* signal other side */
> -    virtqueue_push(n->rx_vq, &elem, total);
> +    if (n->mergeable_rx_bufs) {
> +	int i = 0;
> +
> +	elem.in_sg[0].iov_base += sizeof(*hdr);
> +	elem.in_sg[0].iov_len  -= sizeof(*hdr);
> +
> +	offset += iov_fill(&elem.in_sg[0], elem.in_num,
> +			   buf + offset, size - offset);
> +
> +	/* signal other side */
> +	virtqueue_fill(n->rx_vq, &elem, total, i++);
> +
> +	while (offset < size) {
> +	    int len;
> +
> +	    if (virtqueue_pop(n->rx_vq, &elem) == 0) {
> +		fprintf(stderr, "virtio-net truncating packet\n");
> +		exit(1);
> +	    }
> +
> +	    if (elem.in_num < 1 || elem.in_sg[0].iov_len < TARGET_PAGE_SIZE) {
> +		fprintf(stderr, "virtio-net IOV is irregular\n");
> +		exit(1);
> +	    }
> +
> +	    len = iov_fill(&elem.in_sg[0], elem.in_num,
> +			   buf + offset, size - offset);
> +
> +	    virtqueue_fill(n->rx_vq, &elem, len, i++);
> +
> +	    offset += len;
> +	}
> +
> +	virtqueue_flush(n->rx_vq, i);
> +    } else {
> +	iov_fill(&elem.in_sg[1], elem.in_num - 1,
> +		 buf + offset, size - offset);
> +
> +	/* signal other side */
> +	virtqueue_push(n->rx_vq, &elem, total);
> +    }
> +
>   

Can we merge the two sides of the if () so that the only difference is 
the number of times we go through the loop?

Anthony, please review this as well, my virtio-foo is pretty superficial.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/5] kvm: qemu: Improve virtio_net recv buffer allocation scheme
  2008-10-12 10:00         ` Avi Kivity
@ 2008-10-14 13:44           ` Mark McLoughlin
  2008-10-14 15:47             ` Avi Kivity
  2008-11-26 14:50             ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Mark McLoughlin
  0 siblings, 2 replies; 16+ messages in thread
From: Mark McLoughlin @ 2008-10-14 13:44 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori

On Sun, 2008-10-12 at 12:00 +0200, Avi Kivity wrote:
> Mark McLoughlin wrote:
> > diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
> > index 403247b..afa5fe5 100644
> > --- a/qemu/hw/virtio-net.c
> > +++ b/qemu/hw/virtio-net.c
> > @@ -34,9 +34,13 @@
> >  #define VIRTIO_NET_F_HOST_TSO6	12	/* Host can handle TSOv6 in. */
> >  #define VIRTIO_NET_F_HOST_ECN	13	/* Host can handle TSO[6] w/ ECN in. */
> >  #define VIRTIO_NET_F_HOST_UFO	14	/* Host can handle UFO in. */
> > +#define VIRTIO_NET_F_MRG_RXBUF	15	/* Host can merge receive buffers. */
> >  
> What's the status of the guest side of this feature?

Waiting on Rusty, basically.

As requested, I've changed the ABI so we include the number of merged
buffers in the virtio_net_hdr:

  http://lkml.org/lkml/2008/10/10/204

> >  #define TX_TIMER_INTERVAL 150000 /* 150 us */
> >  
> > +/* Should be the largest MAX_SKB_FRAGS supported. */
> > +#define VIRTIO_NET_MAX_FRAGS	18
> > +
> >   
> 
> This should be advertised by the host to the guest (or vice-versa?).  
> We're embedding Linux-specific magic numbers in a guest-OS-agnostic ABI.
> 
> Perfereably, there shouldn't be a limit at all.

Yeah, it's far from pretty. The current ABI basically says "you must
supply >64k receive buffers" whereas this new ABI says "you must supply
at least 18 >4k receive buffers".

We could think about having the host expose the maximum rx packet size
to the guest (and handle migrating to a host with a different max), but
TBH I don't think it would be worth much until we have the prospect of
running on a host with a larger maximum rx packet size.

Requiring the guest to fill the ring with ~64k of buffers isn't onerous;
the Linux guest impl currently re-fills the ring up to the max (e.g. 256
x 4k)

> > @@ -209,7 +220,12 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
> >      if (virtqueue_pop(n->rx_vq, &elem) == 0)
> >  	return;
> >  
> > -    if (elem.in_num < 1 || elem.in_sg[0].iov_len != sizeof(*hdr)) {
> > +    if (n->mergeable_rx_bufs) {
> > +	if (elem.in_num < 1 || elem.in_sg[0].iov_len < TARGET_PAGE_SIZE) {
> > +	    fprintf(stderr, "virtio-net IOV is irregular\n");
> > +	    exit(1);
> > +	}
> >   
> 
> Again, this is burying details of the current Linux stack into the ABI.  
> The Linux stack may change not to be page oriented, or maybe this won't 
> fit will to how Windows views things.  Can this be made not to depend on 
> the size of the iov elements?

This actually relates to the check in can_receive() - we need to be sure
that we have enough buffers in the ring before we read() from the tapfd.

So, it's not so much about the guest being page oriented, but rather
having a simple way to determine whether there's enough buffers in the
ring - "do we have 18 >4k buffers?" is more preferable than peeking in
the ring and adding up the size of the available buffers

> > +    } else {
> > +	iov_fill(&elem.in_sg[1], elem.in_num - 1,
> > +		 buf + offset, size - offset);
> > +
> > +	/* signal other side */
> > +	virtqueue_push(n->rx_vq, &elem, total);
> > +    }
> > +
> >   
> 
> Can we merge the two sides of the if () so that the only difference is 
> the number of times we go through the loop?

Yeah, good point.

Re-factored version below, which also includes the virtio_net_hdr2
changes.

Cheers,
Mark.

Subject: kvm: qemu: Improve virtio_net recv buffer allocation scheme

Currently, in order to receive large packets, the guest must allocate
max-sized packet buffers and pass them to the host. Each of these
max-sized packets occupy 20 ring entries, which means we can only
transfer a maximum of 12 packets in a single batch with a 256 entry
ring.

When receiving packets from external networks, we only receive MTU
sized packets and so the throughput observed is throttled by the
number of packets the ring can hold.

Implement the VIRTIO_NET_F_MRG_RXBUF feature to let guests know that
we can merge smaller buffers together in order to handle large packets.

This scheme allows us to be efficient in our use of ring entries
while still supporting large packets. Benchmarking using netperf from
an external machine to a guest over a 10Gb/s network shows a 100%
improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark
with GSO disabled on the host side, throughput was seen to increase
from 700Mb/s to 1.7Gb/s.

Based on a patch from Herbert Xu.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio-net.c |  133 +++++++++++++++++++++++++++++++++++++++----------
 1 files changed, 106 insertions(+), 27 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index d6b4457..c082f02 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -34,9 +34,13 @@
 #define VIRTIO_NET_F_HOST_TSO6	12	/* Host can handle TSOv6 in. */
 #define VIRTIO_NET_F_HOST_ECN	13	/* Host can handle TSO[6] w/ ECN in. */
 #define VIRTIO_NET_F_HOST_UFO	14	/* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF	15	/* Host can merge receive buffers. */
 
 #define TX_TIMER_INTERVAL 150000 /* 150 us */
 
+/* Should be the largest MAX_SKB_FRAGS supported. */
+#define VIRTIO_NET_MAX_FRAGS	18
+
 /* The config defining mac address (6 bytes) */
 struct virtio_net_config
 {
@@ -61,6 +65,15 @@ struct virtio_net_hdr
     uint16_t csum_offset;
 };
 
+/* This is the version of the header to use when the MRG_RXBUF
+ * feature (or any later feature) has been negotiated. */
+struct virtio_net_hdr2
+{
+    struct virtio_net_hdr hdr;
+    uint8_t num_buffers;   /* Number of merged rx buffers */
+    uint8_t pad[21];       /* Pad to 32 bytes */
+};
+
 typedef struct VirtIONet
 {
     VirtIODevice vdev;
@@ -70,6 +83,8 @@ typedef struct VirtIONet
     VLANClientState *vc;
     QEMUTimer *tx_timer;
     int tx_timer_active;
+    int mergeable_rx_bufs;
+    int use_vnet_hdr2;
 } VirtIONet;
 
 /* TODO
@@ -106,6 +121,7 @@ static uint32_t virtio_net_get_features(VirtIODevice *vdev)
 	features |= (1 << VIRTIO_NET_F_HOST_TSO4);
 	features |= (1 << VIRTIO_NET_F_HOST_TSO6);
 	features |= (1 << VIRTIO_NET_F_HOST_ECN);
+	features |= (1 << VIRTIO_NET_F_MRG_RXBUF);
 	/* Kernel can't actually handle UFO in software currently. */
     }
 
@@ -117,6 +133,10 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
     VirtIONet *n = to_virtio_net(vdev);
     VLANClientState *host = n->vc->vlan->first_client;
 
+    n->mergeable_rx_bufs = !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF));
+
+    n->use_vnet_hdr2 = n->mergeable_rx_bufs;
+
     if (!tap_has_vnet_hdr(host) || !host->set_offload)
 	return;
 
@@ -141,12 +161,15 @@ static int virtio_net_can_receive(void *opaque)
 {
     VirtIONet *n = opaque;
     VirtQueue *vq = n->rx_vq;
+    int min_bufs;
 
     if (vq->vring.avail == NULL ||
 	!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
 	return 0;
 
-    if (vq->vring.avail->idx == vq->last_avail_idx) {
+    min_bufs = n->mergeable_rx_bufs ? VIRTIO_NET_MAX_FRAGS : 1;
+
+    if ((uint16_t)(vq->vring.avail->idx - vq->last_avail_idx) < min_bufs) {
 	vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
 	return 0;
     }
@@ -198,41 +221,86 @@ static int iov_fill(struct iovec *iov, int iovcnt, const void *buf, int count)
     return offset;
 }
 
-static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
+static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt,
+			  const void *buf, int size, int hdr_len)
 {
-    VirtIONet *n = opaque;
-    VirtQueueElement elem;
-    struct virtio_net_hdr *hdr;
+    struct virtio_net_hdr *hdr = iov[0].iov_base;
     int offset;
-    int total;
-
-    if (virtqueue_pop(n->rx_vq, &elem) == 0)
-	return;
-
-    if (elem.in_num < 1 || elem.in_sg[0].iov_len != sizeof(*hdr)) {
-	fprintf(stderr, "virtio-net header not in first element\n");
-	exit(1);
-    }
 
-    hdr = (void *)elem.in_sg[0].iov_base;
     hdr->flags = 0;
     hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
 
-    offset = 0;
-    total = sizeof(*hdr);
-
     if (tap_has_vnet_hdr(n->vc->vlan->first_client)) {
 	memcpy(hdr, buf, sizeof(*hdr));
-	offset += total;
-        work_around_broken_dhclient(hdr, buf + offset, size - offset);
+	offset = sizeof(*hdr);
+	work_around_broken_dhclient(hdr, buf + offset, size - offset);
+    }
+
+    iov[0].iov_base += hdr_len;
+    iov[0].iov_len  -= hdr_len;
+
+    return offset;
+}
+
+static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
+{
+    VirtIONet *n = opaque;
+    struct virtio_net_hdr2 *hdr2 = NULL;
+    int hdr_len, offset, i;
+
+    hdr_len = n->use_vnet_hdr2 ?
+	sizeof(struct virtio_net_hdr2) : sizeof(struct virtio_net_hdr);
+
+    offset = i = 0;
+
+    while (offset < size) {
+	VirtQueueElement elem;
+	int len, total;
+
+	len = total = 0;
+
+	if ((i != 0 && !n->mergeable_rx_bufs) ||
+	    virtqueue_pop(n->rx_vq, &elem) == 0) {
+	    if (i == 0)
+		return;
+	    fprintf(stderr, "virtio-net truncating packet\n");
+	    exit(1);
+	}
+
+	if (n->mergeable_rx_bufs) {
+	    if (elem.in_num < 1 || elem.in_sg[0].iov_len < TARGET_PAGE_SIZE) {
+		fprintf(stderr, "virtio-net IOV is irregular\n");
+		exit(1);
+	    }
+	} else if (elem.in_num < 1 || elem.in_sg[0].iov_len != hdr_len) {
+	    fprintf(stderr, "virtio-net header not in first element\n");
+	    exit(1);
+	}
+
+	if (i == 0) {
+	    if (n->use_vnet_hdr2)
+		hdr2 = (struct virtio_net_hdr2 *)elem.in_sg[0].iov_base;
+
+	    offset += receive_header(n, &elem.in_sg[0], elem.in_num,
+				     buf + offset, size - offset, hdr_len);
+	    total += hdr_len;
+	}
+
+	/* copy in packet.  ugh */
+	len = iov_fill(&elem.in_sg[0], elem.in_num,
+		       buf + offset, size - offset);
+	total += len;
+
+	/* signal other side */
+	virtqueue_fill(n->rx_vq, &elem, total, i++);
+
+	offset += len;
     }
 
-    /* copy in packet.  ugh */
-    total += iov_fill(&elem.in_sg[1], elem.in_num - 1,
-                      buf + offset, size - offset);
+    if (hdr2)
+	hdr2->num_buffers = i;
 
-    /* signal other side */
-    virtqueue_push(n->rx_vq, &elem, total);
+    virtqueue_flush(n->rx_vq, i);
     virtio_notify(&n->vdev, n->rx_vq);
 }
 
@@ -249,8 +317,12 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
 	ssize_t len = 0;
 	unsigned int out_num = elem.out_num;
 	struct iovec *out_sg = &elem.out_sg[0];
+	unsigned hdr_len;
+
+	hdr_len = n->use_vnet_hdr2 ?
+	    sizeof(struct virtio_net_hdr2) : sizeof(struct virtio_net_hdr);
 
-	if (out_num < 1 || out_sg->iov_len != sizeof(struct virtio_net_hdr)) {
+	if (out_num < 1 || out_sg->iov_len != hdr_len) {
 	    fprintf(stderr, "virtio-net header not in first element\n");
 	    exit(1);
 	}
@@ -259,7 +331,12 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
 	if (!has_vnet_hdr) {
 	    out_num--;
 	    out_sg++;
-	    len += sizeof(struct virtio_net_hdr);
+	    len += hdr_len;
+	} else if (n->use_vnet_hdr2) {
+	    /* tapfd expects a virtio_net_hdr */
+	    hdr_len -= sizeof(struct virtio_net_hdr);
+	    out_sg->iov_len -= hdr_len;
+	    len += hdr_len;
 	}
 
 	len += qemu_sendv_packet(n->vc, out_sg, out_num);
@@ -353,6 +430,8 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
 
     n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n);
     n->tx_timer_active = 0;
+    n->mergeable_rx_bufs = 0;
+    n->use_vnet_hdr2 = 0;
 
     register_savevm("virtio-net", virtio_net_id++, 1,
 		    virtio_net_save, virtio_net_load, n);
-- 
1.6.0.1



^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/5] kvm: qemu: Improve virtio_net recv buffer allocation scheme
  2008-10-14 13:44           ` Mark McLoughlin
@ 2008-10-14 15:47             ` Avi Kivity
  2008-11-26 14:50             ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Mark McLoughlin
  1 sibling, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2008-10-14 15:47 UTC (permalink / raw)
  To: Mark McLoughlin; +Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori

Mark McLoughlin wrote:
> Yeah, it's far from pretty. The current ABI basically says "you must
> supply >64k receive buffers" whereas this new ABI says "you must supply
> at least 18 >4k receive buffers".
>
> We could think about having the host expose the maximum rx packet size
> to the guest (and handle migrating to a host with a different max), but
> TBH I don't think it would be worth much until we have the prospect of
> running on a host with a larger maximum rx packet size.
>
> Requiring the guest to fill the ring with ~64k of buffers isn't onerous;
> the Linux guest impl currently re-fills the ring up to the max (e.g. 256
> x 4k)
>
>   

We should document the ABI somewhere.  All those hidden rules will make
implementing non-Linux drivers difficult, as well as maintaining
backwards compatibility.


-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers
  2008-10-14 13:44           ` Mark McLoughlin
  2008-10-14 15:47             ` Avi Kivity
@ 2008-11-26 14:50             ` Mark McLoughlin
  2008-11-26 14:50               ` [PATCH 1/5] kvm: qemu: virtio: move virtqueue_next_desc() around Mark McLoughlin
  2008-11-27 12:45               ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Avi Kivity
  1 sibling, 2 replies; 16+ messages in thread
From: Mark McLoughlin @ 2008-11-26 14:50 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori


Hi,
        The mergeable receive buffers scheme has been in the Linux
virtio_net driver in net-next-2.6 for a while now, so it's probably
safe to assume what's there now is the final iteration of the ABI.

        The following patches implement support for the scheme in
KVM. Only the 5/5 patch contains functional changes.

        Changes since last time:

  + We now peak at how much buffer space is actually available in the
    ring and no longer assume that buffers are at least 4k

  + The new header is now 'struct virtio_net_hdr_mrg_rxbuf' rather
    than 'struct virtio_net_hdr2'

  + The new header no longer is padded to 32 bytes

Cheers,
Mark.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/5] kvm: qemu: virtio: move virtqueue_next_desc() around
  2008-11-26 14:50             ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Mark McLoughlin
@ 2008-11-26 14:50               ` Mark McLoughlin
  2008-11-26 14:50                 ` [PATCH 2/5] kvm: qemu: virtio: introduce virtqueue_fill() and virtqueue_flush() Mark McLoughlin
  2008-11-27 12:45               ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Avi Kivity
  1 sibling, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-11-26 14:50 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori, Mark McLoughlin

virtio_next_desc() is only used in virtqueue_pop(), so move them
alongside one another.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio.c |   30 +++++++++++++++---------------
 1 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index 8fac354..fe0a120 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -107,6 +107,21 @@ static void virtqueue_init(VirtQueue *vq, void *p)
     vq->vring.used = (void *)TARGET_PAGE_ALIGN((unsigned long)&vq->vring.avail->ring[vq->vring.num]);
 }
 
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len)
+{
+    VRingUsedElem *used;
+
+    /* Get a pointer to the next entry in the used ring. */
+    used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
+    used->id = elem->index;
+    used->len = len;
+    /* Make sure buffer is written before we update index. */
+    wmb();
+    vq->vring.used->idx++;
+    vq->inuse--;
+}
+
 static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
 {
     unsigned int next;
@@ -126,21 +141,6 @@ static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
     return next;
 }
 
-void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
-		    unsigned int len)
-{
-    VRingUsedElem *used;
-
-    /* Get a pointer to the next entry in the used ring. */
-    used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
-    used->id = elem->index;
-    used->len = len;
-    /* Make sure buffer is written before we update index. */
-    wmb();
-    vq->vring.used->idx++;
-    vq->inuse--;
-}
-
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
 {
     unsigned int i, head;
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/5] kvm: qemu: virtio: introduce virtqueue_fill() and virtqueue_flush()
  2008-11-26 14:50               ` [PATCH 1/5] kvm: qemu: virtio: move virtqueue_next_desc() around Mark McLoughlin
@ 2008-11-26 14:50                 ` Mark McLoughlin
  2008-11-26 14:50                   ` [PATCH 3/5] kvm: qemu: virtio: split some helpers out of virtqueue_pop() Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-11-26 14:50 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori, Mark McLoughlin

Split virtqueue_push() into two logical steps - adding an element
to the used ring and notifying the other side of added elements.

This is needed because with the mergeable receive buffers scheme we
will add buffers to the used ring as we copy the packet data into them
but we only want to notify the guest of the new buffers once all the
packet buffers are available.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio.c |   23 ++++++++++++++++++-----
 qemu/hw/virtio.h |    3 +++
 2 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index fe0a120..18ad3ed 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -107,19 +107,32 @@ static void virtqueue_init(VirtQueue *vq, void *p)
     vq->vring.used = (void *)TARGET_PAGE_ALIGN((unsigned long)&vq->vring.avail->ring[vq->vring.num]);
 }
 
-void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
-		    unsigned int len)
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len, unsigned int idx)
 {
     VRingUsedElem *used;
 
+    idx += vq->vring.used->idx;
+
     /* Get a pointer to the next entry in the used ring. */
-    used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
+    used = &vq->vring.used->ring[idx % vq->vring.num];
     used->id = elem->index;
     used->len = len;
+}
+
+void virtqueue_flush(VirtQueue *vq, unsigned int count)
+{
     /* Make sure buffer is written before we update index. */
     wmb();
-    vq->vring.used->idx++;
-    vq->inuse--;
+    vq->vring.used->idx += count;
+    vq->inuse -= count;
+}
+
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len)
+{
+    virtqueue_fill(vq, elem, len, 0);
+    virtqueue_flush(vq, 1);
 }
 
 static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
diff --git a/qemu/hw/virtio.h b/qemu/hw/virtio.h
index 0dcedbf..87a15cc 100644
--- a/qemu/hw/virtio.h
+++ b/qemu/hw/virtio.h
@@ -139,6 +139,9 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
 
 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
 		    unsigned int len);
+void virtqueue_flush(VirtQueue *vq, unsigned int count);
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+		    unsigned int len, unsigned int idx);
 
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
 
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/5] kvm: qemu: virtio: split some helpers out of virtqueue_pop()
  2008-11-26 14:50                 ` [PATCH 2/5] kvm: qemu: virtio: introduce virtqueue_fill() and virtqueue_flush() Mark McLoughlin
@ 2008-11-26 14:50                   ` Mark McLoughlin
  2008-11-26 14:50                     ` [PATCH 4/5] kvm: qemu: virtio-net: split iov_fill() out from virtio_net_receive() Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-11-26 14:50 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori, Mark McLoughlin

The mergeable receive buffer scheme will introduce a new function
which peeks at how much buffer space is available in the queue.

Split out some helper functions from virtqueue_pop() for that purpose.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio.c |   45 +++++++++++++++++++++++++++++----------------
 1 files changed, 29 insertions(+), 16 deletions(-)

diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index 18ad3ed..42022d4 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -135,6 +135,33 @@ void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
     virtqueue_flush(vq, 1);
 }
 
+static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
+{
+    uint16_t num_heads = vq->vring.avail->idx - idx;
+
+    /* Check it isn't doing very strange things with descriptor numbers. */
+    if (num_heads > vq->vring.num)
+	errx(1, "Guest moved used index from %u to %u",
+	     idx, vq->vring.avail->idx);
+
+    return num_heads;
+}
+
+static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
+{
+    unsigned int head;
+
+    /* Grab the next descriptor number they're advertising, and increment
+     * the index we've seen. */
+    head = vq->vring.avail->ring[idx % vq->vring.num];
+
+    /* If their number is silly, that's a fatal mistake. */
+    if (head >= vq->vring.num)
+	errx(1, "Guest says index %u is available", head);
+
+    return head;
+}
+
 static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
 {
     unsigned int next;
@@ -158,27 +185,13 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
 {
     unsigned int i, head;
 
-    /* Check it isn't doing very strange things with descriptor numbers. */
-    if ((uint16_t)(vq->vring.avail->idx - vq->last_avail_idx) > vq->vring.num)
-	errx(1, "Guest moved used index from %u to %u",
-	     vq->last_avail_idx, vq->vring.avail->idx);
-
-    /* If there's nothing new since last we looked, return invalid. */
-    if (vq->vring.avail->idx == vq->last_avail_idx)
+    if (!virtqueue_num_heads(vq, vq->last_avail_idx))
 	return 0;
 
-    /* Grab the next descriptor number they're advertising, and increment
-     * the index we've seen. */
-    head = vq->vring.avail->ring[vq->last_avail_idx++ % vq->vring.num];
-
-    /* If their number is silly, that's a fatal mistake. */
-    if (head >= vq->vring.num)
-	errx(1, "Guest says index %u is available", head);
-
     /* When we start there are none of either input nor output. */
     elem->out_num = elem->in_num = 0;
 
-    i = head;
+    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
     do {
 	struct iovec *sg;
 
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/5] kvm: qemu: virtio-net: split iov_fill() out from virtio_net_receive()
  2008-11-26 14:50                   ` [PATCH 3/5] kvm: qemu: virtio: split some helpers out of virtqueue_pop() Mark McLoughlin
@ 2008-11-26 14:50                     ` Mark McLoughlin
  2008-11-26 14:50                       ` [PATCH 5/5] kvm: qemu: virtio-net: add a new virtio-net receive buffer scheme Mark McLoughlin
  0 siblings, 1 reply; 16+ messages in thread
From: Mark McLoughlin @ 2008-11-26 14:50 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori, Mark McLoughlin

Simplifies the current code, but more especially, simplifies the
implementation of the mergeable receive buffers scheme.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio-net.c |   27 ++++++++++++++++++---------
 1 files changed, 18 insertions(+), 9 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index bc2ede6..2a52536 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -182,12 +182,27 @@ static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
     }
 }
 
+static int iov_fill(struct iovec *iov, int iovcnt, const void *buf, int count)
+{
+    int offset, i;
+
+    offset = i = 0;
+    while (offset < count && i < iovcnt) {
+	int len = MIN(iov[i].iov_len, count - offset);
+	memcpy(iov[i].iov_base, buf + offset, len);
+	offset += len;
+	i++;
+    }
+
+    return offset;
+}
+
 static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
 {
     VirtIONet *n = opaque;
     VirtQueueElement elem;
     struct virtio_net_hdr *hdr;
-    int offset, i;
+    int offset;
     int total;
 
     if (virtqueue_pop(n->rx_vq, &elem) == 0)
@@ -212,14 +227,8 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
     }
 
     /* copy in packet.  ugh */
-    i = 1;
-    while (offset < size && i < elem.in_num) {
-	int len = MIN(elem.in_sg[i].iov_len, size - offset);
-	memcpy(elem.in_sg[i].iov_base, buf + offset, len);
-	offset += len;
-	total += len;
-	i++;
-    }
+    total += iov_fill(&elem.in_sg[1], elem.in_num - 1,
+                      buf + offset, size - offset);
 
     /* signal other side */
     virtqueue_push(n->rx_vq, &elem, total);
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/5] kvm: qemu: virtio-net: add a new virtio-net receive buffer scheme
  2008-11-26 14:50                     ` [PATCH 4/5] kvm: qemu: virtio-net: split iov_fill() out from virtio_net_receive() Mark McLoughlin
@ 2008-11-26 14:50                       ` Mark McLoughlin
  0 siblings, 0 replies; 16+ messages in thread
From: Mark McLoughlin @ 2008-11-26 14:50 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori, Mark McLoughlin

Currently, in order to receive large packets, the guest must allocate
max-sized packet buffers and pass them to the host. Each of these
max-sized packets occupy 20 ring entries, which means we can only
transfer a maximum of 12 packets in a single batch with a 256 entry
ring.

When receiving packets from external networks, we only receive MTU
sized packets and so the throughput observed is throttled by the
number of packets the ring can hold.

Implement the VIRTIO_NET_F_MRG_RXBUF feature to let guests know that
we can merge smaller buffers together in order to handle large packets.

This scheme allows us to be efficient in our use of ring entries
while still supporting large packets. Benchmarking using netperf from
an external machine to a guest over a 10Gb/s network shows a 100%
improvement from ~1Gb/s to ~2Gb/s. With a local host->guest benchmark
with GSO disabled on the host side, throughput was seen to increase
from 700Mb/s to 1.7Gb/s.

Based on a patch from Herbert Xu.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
---
 qemu/hw/virtio-net.c |  133 +++++++++++++++++++++++++++++++++++++++----------
 qemu/hw/virtio.c     |   32 ++++++++++++
 qemu/hw/virtio.h     |    1 +
 3 files changed, 139 insertions(+), 27 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index 2a52536..b5d5f9e 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -34,9 +34,13 @@
 #define VIRTIO_NET_F_HOST_TSO6	12	/* Host can handle TSOv6 in. */
 #define VIRTIO_NET_F_HOST_ECN	13	/* Host can handle TSO[6] w/ ECN in. */
 #define VIRTIO_NET_F_HOST_UFO	14	/* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF	15	/* Host can merge receive buffers. */
 
 #define TX_TIMER_INTERVAL 150000 /* 150 us */
 
+/* Maximum packet size we can receive from tap device: header + 64k */
+#define VIRTIO_NET_MAX_BUFSIZE	(sizeof(struct virtio_net_hdr) + (64 << 10))
+
 /* The config defining mac address (6 bytes) */
 struct virtio_net_config
 {
@@ -61,6 +65,14 @@ struct virtio_net_hdr
     uint16_t csum_offset;
 };
 
+/* This is the version of the header to use when the MRG_RXBUF
+ * feature has been negotiated. */
+struct virtio_net_hdr_mrg_rxbuf
+{
+    struct virtio_net_hdr hdr;
+    uint16_t num_buffers;   /* Number of merged rx buffers */
+};
+
 typedef struct VirtIONet
 {
     VirtIODevice vdev;
@@ -70,6 +82,7 @@ typedef struct VirtIONet
     VLANClientState *vc;
     QEMUTimer *tx_timer;
     int tx_timer_active;
+    int mergeable_rx_bufs;
 } VirtIONet;
 
 /* TODO
@@ -106,6 +119,7 @@ static uint32_t virtio_net_get_features(VirtIODevice *vdev)
 	features |= (1 << VIRTIO_NET_F_HOST_TSO4);
 	features |= (1 << VIRTIO_NET_F_HOST_TSO6);
 	features |= (1 << VIRTIO_NET_F_HOST_ECN);
+	features |= (1 << VIRTIO_NET_F_MRG_RXBUF);
 	/* Kernel can't actually handle UFO in software currently. */
     }
 
@@ -117,6 +131,8 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
     VirtIONet *n = to_virtio_net(vdev);
     VLANClientState *host = n->vc->vlan->first_client;
 
+    n->mergeable_rx_bufs = !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF));
+
     if (!tap_has_vnet_hdr(host) || !host->set_offload)
 	return;
 
@@ -145,7 +161,9 @@ static int virtio_net_can_receive(void *opaque)
 	!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
 	return 0;
 
-    if (n->rx_vq->vring.avail->idx == n->rx_vq->last_avail_idx) {
+    if (n->rx_vq->vring.avail->idx == n->rx_vq->last_avail_idx ||
+	(n->mergeable_rx_bufs &&
+	 !virtqueue_avail_bytes(n->rx_vq, VIRTIO_NET_MAX_BUFSIZE, 0))) {
 	n->rx_vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
 	return 0;
     }
@@ -197,41 +215,90 @@ static int iov_fill(struct iovec *iov, int iovcnt, const void *buf, int count)
     return offset;
 }
 
-static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
+static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt,
+			  const void *buf, int size, int hdr_len)
 {
-    VirtIONet *n = opaque;
-    VirtQueueElement elem;
-    struct virtio_net_hdr *hdr;
+    struct virtio_net_hdr *hdr = iov[0].iov_base;
     int offset;
-    int total;
-
-    if (virtqueue_pop(n->rx_vq, &elem) == 0)
-	return;
-
-    if (elem.in_num < 1 || elem.in_sg[0].iov_len != sizeof(*hdr)) {
-	fprintf(stderr, "virtio-net header not in first element\n");
-	exit(1);
-    }
 
-    hdr = (void *)elem.in_sg[0].iov_base;
     hdr->flags = 0;
     hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
 
-    offset = 0;
-    total = sizeof(*hdr);
-
     if (tap_has_vnet_hdr(n->vc->vlan->first_client)) {
 	memcpy(hdr, buf, sizeof(*hdr));
-	offset += total;
-        work_around_broken_dhclient(hdr, buf + offset, size - offset);
+	offset = sizeof(*hdr);
+	work_around_broken_dhclient(hdr, buf + offset, size - offset);
     }
 
-    /* copy in packet.  ugh */
-    total += iov_fill(&elem.in_sg[1], elem.in_num - 1,
-                      buf + offset, size - offset);
+    /* We only ever receive a struct virtio_net_hdr from the tapfd,
+     * but we may be passing along a larger header to the guest.
+     */
+    iov[0].iov_base += hdr_len;
+    iov[0].iov_len  -= hdr_len;
+
+    return offset;
+}
+
+static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
+{
+    VirtIONet *n = opaque;
+    struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL;
+    int hdr_len, offset, i;
+
+    /* hdr_len refers to the header we supply to the guest */
+    hdr_len = n->mergeable_rx_bufs ?
+	sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
+
+    offset = i = 0;
 
-    /* signal other side */
-    virtqueue_push(n->rx_vq, &elem, total);
+    while (offset < size) {
+	VirtQueueElement elem;
+	int len, total;
+
+	len = total = 0;
+
+	if ((i != 0 && !n->mergeable_rx_bufs) ||
+	    virtqueue_pop(n->rx_vq, &elem) == 0) {
+	    if (i == 0)
+		return;
+	    fprintf(stderr, "virtio-net truncating packet\n");
+	    exit(1);
+	}
+
+	if (elem.in_num < 1) {
+	    fprintf(stderr, "virtio-net receive queue contains no in buffers\n");
+	    exit(1);
+	}
+
+	if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) {
+	    fprintf(stderr, "virtio-net header not in first element\n");
+	    exit(1);
+	}
+
+	if (i == 0) {
+	    if (n->mergeable_rx_bufs)
+		mhdr = (struct virtio_net_hdr_mrg_rxbuf *)elem.in_sg[0].iov_base;
+
+	    offset += receive_header(n, &elem.in_sg[0], elem.in_num,
+				     buf + offset, size - offset, hdr_len);
+	    total += hdr_len;
+	}
+
+	/* copy in packet.  ugh */
+	len = iov_fill(&elem.in_sg[0], elem.in_num,
+		       buf + offset, size - offset);
+	total += len;
+
+	/* signal other side */
+	virtqueue_fill(n->rx_vq, &elem, total, i++);
+
+	offset += len;
+    }
+
+    if (mhdr)
+	mhdr->num_buffers = i;
+
+    virtqueue_flush(n->rx_vq, i);
     virtio_notify(&n->vdev, n->rx_vq);
 }
 
@@ -248,8 +315,14 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
 	ssize_t len = 0;
 	unsigned int out_num = elem.out_num;
 	struct iovec *out_sg = &elem.out_sg[0];
+	unsigned hdr_len;
+
+        /* hdr_len refers to the header received from the guest */
+	hdr_len = n->mergeable_rx_bufs ?
+	    sizeof(struct virtio_net_hdr_mrg_rxbuf) :
+	    sizeof(struct virtio_net_hdr);
 
-	if (out_num < 1 || out_sg->iov_len != sizeof(struct virtio_net_hdr)) {
+	if (out_num < 1 || out_sg->iov_len != hdr_len) {
 	    fprintf(stderr, "virtio-net header not in first element\n");
 	    exit(1);
 	}
@@ -258,7 +331,12 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
 	if (!has_vnet_hdr) {
 	    out_num--;
 	    out_sg++;
-	    len += sizeof(struct virtio_net_hdr);
+	    len += hdr_len;
+	} else if (n->mergeable_rx_bufs) {
+	    /* tapfd expects a struct virtio_net_hdr */
+	    hdr_len -= sizeof(struct virtio_net_hdr);
+	    out_sg->iov_len -= hdr_len;
+	    len += hdr_len;
 	}
 
 	len += qemu_sendv_packet(n->vc, out_sg, out_num);
@@ -352,6 +430,7 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
 
     n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n);
     n->tx_timer_active = 0;
+    n->mergeable_rx_bufs = 0;
 
     register_savevm("virtio-net", virtio_net_id++, 1,
 		    virtio_net_save, virtio_net_load, n);
diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index 42022d4..303f5e7 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -181,6 +181,38 @@ static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
     return next;
 }
 
+int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
+{
+    unsigned int idx;
+    int num_bufs, in_total, out_total;
+
+    idx = vq->last_avail_idx;
+
+    num_bufs = in_total = out_total = 0;
+    while (virtqueue_num_heads(vq, idx)) {
+	int i;
+
+	i = virtqueue_get_head(vq, idx++);
+	do {
+	    /* If we've got too many, that implies a descriptor loop. */
+	    if (++num_bufs > vq->vring.num)
+		errx(1, "Looped descriptor");
+
+	    if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE) {
+		if (in_bytes > 0 &&
+		    (in_total += vq->vring.desc[i].len) >= in_bytes)
+		    return 1;
+	    } else {
+		if (out_bytes > 0 &&
+		    (out_total += vq->vring.desc[i].len) >= out_bytes)
+		    return 1;
+	    }
+	} while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
+    }
+
+    return 0;
+}
+
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
 {
     unsigned int i, head;
diff --git a/qemu/hw/virtio.h b/qemu/hw/virtio.h
index 87a15cc..15f020b 100644
--- a/qemu/hw/virtio.h
+++ b/qemu/hw/virtio.h
@@ -144,6 +144,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
 		    unsigned int len, unsigned int idx);
 
 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
+int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes);
 
 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
 
-- 
1.5.4.3


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers
  2008-11-26 14:50             ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Mark McLoughlin
  2008-11-26 14:50               ` [PATCH 1/5] kvm: qemu: virtio: move virtqueue_next_desc() around Mark McLoughlin
@ 2008-11-27 12:45               ` Avi Kivity
  2008-11-27 13:32                 ` Mark McLoughlin
  1 sibling, 1 reply; 16+ messages in thread
From: Avi Kivity @ 2008-11-27 12:45 UTC (permalink / raw)
  To: Mark McLoughlin; +Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori

Mark McLoughlin wrote:
> Hi,
>         The mergeable receive buffers scheme has been in the Linux
> virtio_net driver in net-next-2.6 for a while now, so it's probably
> safe to assume what's there now is the final iteration of the ABI.
>
>         The following patches implement support for the scheme in
> KVM. Only the 5/5 patch contains functional changes.
>   

Applied all, thanks.

>         Changes since last time:
>
>   + We now peak at how much buffer space is actually available in the
>     ring and no longer assume that buffers are at least 4k
>
>   + The new header is now 'struct virtio_net_hdr_mrg_rxbuf' rather
>     than 'struct virtio_net_hdr2'
>   

Does the updated driver support both?

>   + The new header no longer is padded to 32 bytes
>   

Pity.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers
  2008-11-27 12:45               ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Avi Kivity
@ 2008-11-27 13:32                 ` Mark McLoughlin
  0 siblings, 0 replies; 16+ messages in thread
From: Mark McLoughlin @ 2008-11-27 13:32 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, Rusty Russell, Herbert Xu, Anthony Liguori

On Thu, 2008-11-27 at 14:45 +0200, Avi Kivity wrote:
> Mark McLoughlin wrote:
> > Hi,
> >         The mergeable receive buffers scheme has been in the Linux
> > virtio_net driver in net-next-2.6 for a while now, so it's probably
> > safe to assume what's there now is the final iteration of the ABI.
> >
> >         The following patches implement support for the scheme in
> > KVM. Only the 5/5 patch contains functional changes.
> >   
> 
> Applied all, thanks.

Great.

> >         Changes since last time:
> >
> >   + We now peak at how much buffer space is actually available in the
> >     ring and no longer assume that buffers are at least 4k
> >
> >   + The new header is now 'struct virtio_net_hdr_mrg_rxbuf' rather
> >     than 'struct virtio_net_hdr2'
> >   
> 
> Does the updated driver support both?

Nope, just the struct virtio_net_hdr_mrg_rxbuf'

Cheers,
Mark.


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2008-11-27 13:33 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-10-08 19:35 [PATCH 1/5] kvm: qemu: Move virtqueue_next_desc() around Mark McLoughlin
2008-10-08 19:35 ` [PATCH 2/5] kvm: qemu: Introduce virtqueue_fill() and virtqueue_flush() Mark McLoughlin
2008-10-08 19:35   ` [PATCH 3/5] kvm: qemu: Simplify virtio_net_can_receive() a little Mark McLoughlin
2008-10-08 19:35     ` [PATCH 4/5] kvm: qemu: Split iov_fill() out from virtio_net_receive() Mark McLoughlin
2008-10-08 19:35       ` [PATCH 5/5] kvm: qemu: Improve virtio_net recv buffer allocation scheme Mark McLoughlin
2008-10-12 10:00         ` Avi Kivity
2008-10-14 13:44           ` Mark McLoughlin
2008-10-14 15:47             ` Avi Kivity
2008-11-26 14:50             ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Mark McLoughlin
2008-11-26 14:50               ` [PATCH 1/5] kvm: qemu: virtio: move virtqueue_next_desc() around Mark McLoughlin
2008-11-26 14:50                 ` [PATCH 2/5] kvm: qemu: virtio: introduce virtqueue_fill() and virtqueue_flush() Mark McLoughlin
2008-11-26 14:50                   ` [PATCH 3/5] kvm: qemu: virtio: split some helpers out of virtqueue_pop() Mark McLoughlin
2008-11-26 14:50                     ` [PATCH 4/5] kvm: qemu: virtio-net: split iov_fill() out from virtio_net_receive() Mark McLoughlin
2008-11-26 14:50                       ` [PATCH 5/5] kvm: qemu: virtio-net: add a new virtio-net receive buffer scheme Mark McLoughlin
2008-11-27 12:45               ` [PATCH 0/5] kvm: qemu: virtio_net: add support for mergeable rx buffers Avi Kivity
2008-11-27 13:32                 ` Mark McLoughlin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).