public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/4] kvm tools: Add ioeventfd support
@ 2011-05-27 16:18 Sasha Levin
  2011-05-27 16:18 ` [PATCH v2 2/4] kvm tools: Use ioeventfd in virtio-blk Sasha Levin
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Sasha Levin @ 2011-05-27 16:18 UTC (permalink / raw)
  To: penberg
  Cc: john, kvm, mingo, asias.hejun, gorcunov, prasadjoshi124,
	Sasha Levin

ioeventfd is way provided by KVM to receive notifications about
reads and writes to PIO and MMIO areas within the guest.

Such notifications are usefull if all we need to know is that
a specific area of the memory has been changed, and we don't need
a heavyweight exit to happen.

The implementation uses epoll to scale to large number of ioeventfds.

Benchmarks ran on a seperate (non boot) 1GB virtio-blk device, formatted
as ext4, using bonnie++.

cmd line:
# bonnie++ -d temp/ -c 2 -s 768 -u 0

Before:
Version  1.96       ------Sequential Output------ --Sequential Input- --Random-
Concurrency   2     -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--
Machine        Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP  /sec %CP
tux            768M   498  99 381127  74 269712  48  2871  99 717109  50 +++++ +++
Latency             18368us   31063us   21281us    3017us    6875us     251ms
Version  1.96       ------Sequential Create------ --------Random Create--------
tux                 -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--
              files  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP
                 16 +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++
Latency               148us     588us    2792us    1547us    1543us     218us

After:
Version  1.96       ------Sequential Output------ --Sequential Input- --Random-
Concurrency   2     -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--
Machine        Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP  /sec %CP
tux            768M   499  99 459779  73 350689  54  2997  99 860395  58 +++++ +++
Latency             17194us   14619us   26358us    4055us    7890us   44122us
Version  1.96       ------Sequential Create------ --------Random Create--------
tux                 -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--
              files  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP
                 16 +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++
Latency               135us     567us    2564us     134us    1500us     231us

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/Makefile                |    1 +
 tools/kvm/include/kvm/ioeventfd.h |   27 ++++++++
 tools/kvm/ioeventfd.c             |  128 +++++++++++++++++++++++++++++++++++++
 tools/kvm/kvm-run.c               |    4 +
 4 files changed, 160 insertions(+), 0 deletions(-)
 create mode 100644 tools/kvm/include/kvm/ioeventfd.h
 create mode 100644 tools/kvm/ioeventfd.c

diff --git a/tools/kvm/Makefile b/tools/kvm/Makefile
index 2ebc86c..e7ceb5c 100644
--- a/tools/kvm/Makefile
+++ b/tools/kvm/Makefile
@@ -48,6 +48,7 @@ OBJS    += irq.o
 OBJS    += rbtree.o
 OBJS    += util/rbtree-interval.o
 OBJS    += virtio/9p.o
+OBJS    += ioeventfd.o
 
 
 FLAGS_BFD=$(CFLAGS) -lbfd
diff --git a/tools/kvm/include/kvm/ioeventfd.h b/tools/kvm/include/kvm/ioeventfd.h
new file mode 100644
index 0000000..df01750
--- /dev/null
+++ b/tools/kvm/include/kvm/ioeventfd.h
@@ -0,0 +1,27 @@
+#ifndef KVM__IOEVENTFD_H
+#define KVM__IOEVENTFD_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <sys/eventfd.h>
+
+struct kvm;
+
+struct ioevent {
+	u64			io_addr;
+	u8			io_len;
+	void			(*fn)(struct kvm *kvm, void *ptr);
+	struct kvm		*fn_kvm;
+	void			*fn_ptr;
+	int			fd;
+	u64			datamatch;
+
+	struct list_head	list;
+};
+
+void ioeventfd__init(void);
+void ioeventfd__start(void);
+void ioeventfd__add_event(struct ioevent *ioevent);
+void ioeventfd__del_event(u64 addr, u64 datamatch);
+
+#endif
diff --git a/tools/kvm/ioeventfd.c b/tools/kvm/ioeventfd.c
new file mode 100644
index 0000000..3a240e4
--- /dev/null
+++ b/tools/kvm/ioeventfd.c
@@ -0,0 +1,128 @@
+#include <sys/epoll.h>
+#include <sys/ioctl.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <signal.h>
+
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include <linux/types.h>
+
+#include "kvm/ioeventfd.h"
+#include "kvm/kvm.h"
+#include "kvm/util.h"
+
+#define IOEVENTFD_MAX_EVENTS	20
+
+static struct	epoll_event events[IOEVENTFD_MAX_EVENTS];
+static int	epoll_fd;
+static LIST_HEAD(used_ioevents);
+
+void ioeventfd__init(void)
+{
+	epoll_fd = epoll_create(IOEVENTFD_MAX_EVENTS);
+	if (epoll_fd < 0)
+		die("Failed creating epoll fd");
+}
+
+void ioeventfd__add_event(struct ioevent *ioevent)
+{
+	struct kvm_ioeventfd kvm_ioevent;
+	struct epoll_event epoll_event;
+	struct ioevent *new_ioevent;
+	int event;
+
+	new_ioevent = malloc(sizeof(*new_ioevent));
+	if (new_ioevent == NULL)
+		die("Failed allocating memory for new ioevent");
+
+	*new_ioevent = *ioevent;
+	event = new_ioevent->fd;
+
+	kvm_ioevent = (struct kvm_ioeventfd) {
+		.addr			= ioevent->io_addr,
+		.len			= ioevent->io_len,
+		.datamatch		= ioevent->datamatch,
+		.fd			= event,
+		.flags			= KVM_IOEVENTFD_FLAG_PIO | KVM_IOEVENTFD_FLAG_DATAMATCH,
+	};
+
+	if (ioctl(ioevent->fn_kvm->vm_fd, KVM_IOEVENTFD, &kvm_ioevent) != 0)
+		die("Failed creating new ioeventfd");
+
+	epoll_event = (struct epoll_event) {
+		.events			= EPOLLIN,
+		.data.ptr		= new_ioevent,
+	};
+
+	if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event, &epoll_event) != 0)
+		die("Failed assigning new event to the epoll fd");
+
+	list_add_tail(&new_ioevent->list, &used_ioevents);
+}
+
+void ioeventfd__del_event(u64 addr, u64 datamatch)
+{
+	struct kvm_ioeventfd kvm_ioevent;
+	struct ioevent *ioevent;
+	u8 found = 0;
+
+	list_for_each_entry(ioevent, &used_ioevents, list) {
+		if (ioevent->io_addr == addr) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found == 0 || ioevent == NULL)
+		return;
+
+	kvm_ioevent = (struct kvm_ioeventfd) {
+		.addr			= ioevent->io_addr,
+		.len			= ioevent->io_len,
+		.datamatch		= ioevent->datamatch,
+		.flags			= KVM_IOEVENTFD_FLAG_PIO
+					| KVM_IOEVENTFD_FLAG_DEASSIGN
+					| KVM_IOEVENTFD_FLAG_DATAMATCH,
+	};
+
+	ioctl(ioevent->fn_kvm->vm_fd, KVM_IOEVENTFD, &kvm_ioevent);
+
+	epoll_ctl(epoll_fd, EPOLL_CTL_DEL, ioevent->fd, NULL);
+
+	list_del(&ioevent->list);
+
+	close(ioevent->fd);
+	free(ioevent);
+}
+
+static void *ioeventfd__thread(void *param)
+{
+	for (;;) {
+		int nfds, i;
+
+		nfds = epoll_wait(epoll_fd, events, IOEVENTFD_MAX_EVENTS, -1);
+		for (i = 0; i < nfds; i++) {
+			u64 tmp;
+			struct ioevent *ioevent;
+
+			ioevent = events[i].data.ptr;
+
+			if (read(ioevent->fd, &tmp, sizeof(tmp)) < 0)
+				die("Failed reading event");
+
+			ioevent->fn(ioevent->fn_kvm, ioevent->fn_ptr);
+		}
+	}
+
+	return NULL;
+}
+
+void ioeventfd__start(void)
+{
+	pthread_t thread;
+
+	if (pthread_create(&thread, NULL, ioeventfd__thread, NULL) != 0)
+		die("Failed starting ioeventfd thread");
+}
diff --git a/tools/kvm/kvm-run.c b/tools/kvm/kvm-run.c
index f384ddd..48b8e70 100644
--- a/tools/kvm/kvm-run.c
+++ b/tools/kvm/kvm-run.c
@@ -29,6 +29,7 @@
 #include <kvm/symbol.h>
 #include <kvm/virtio-9p.h>
 #include <kvm/vesa.h>
+#include <kvm/ioeventfd.h>
 
 /* header files for gitish interface  */
 #include <kvm/kvm-run.h>
@@ -505,6 +506,8 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix)
 
 	kvm = kvm__init(kvm_dev, ram_size);
 
+	ioeventfd__init();
+
 	max_cpus = kvm__max_cpus(kvm);
 
 	if (nrcpus > max_cpus) {
@@ -612,6 +615,7 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix)
 		vesa__init(kvm);
 
 	thread_pool__init(nr_online_cpus);
+	ioeventfd__start();
 
 	for (i = 0; i < nrcpus; i++) {
 		if (pthread_create(&kvm_cpus[i]->thread, NULL, kvm_cpu_thread, kvm_cpus[i]) != 0)
-- 
1.7.5.rc3


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2 2/4] kvm tools: Use ioeventfd in virtio-blk
  2011-05-27 16:18 [PATCH v2 1/4] kvm tools: Add ioeventfd support Sasha Levin
@ 2011-05-27 16:18 ` Sasha Levin
  2011-05-27 16:18 ` [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net Sasha Levin
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 13+ messages in thread
From: Sasha Levin @ 2011-05-27 16:18 UTC (permalink / raw)
  To: penberg
  Cc: john, kvm, mingo, asias.hejun, gorcunov, prasadjoshi124,
	Sasha Levin

Use ioeventfds to receive notifications of IO events in virtio-blk.
Doing so prevents an exit every time we read/write from/to the
virtio disk.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/virtio/blk.c |   26 +++++++++++++++++++++++++-
 1 files changed, 25 insertions(+), 1 deletions(-)

diff --git a/tools/kvm/virtio/blk.c b/tools/kvm/virtio/blk.c
index a8f9d8c..8d54f5a 100644
--- a/tools/kvm/virtio/blk.c
+++ b/tools/kvm/virtio/blk.c
@@ -10,6 +10,7 @@
 #include "kvm/kvm.h"
 #include "kvm/pci.h"
 #include "kvm/threadpool.h"
+#include "kvm/ioeventfd.h"
 
 #include <linux/virtio_ring.h>
 #include <linux/virtio_blk.h>
@@ -243,11 +244,19 @@ static struct ioport_operations virtio_blk_io_ops = {
 	.io_out	= virtio_blk_pci_io_out,
 };
 
+static void ioevent_callback(struct kvm *kvm, void *param)
+{
+	struct blk_dev_job *job = param;
+
+	thread_pool__do_job(job->job_id);
+}
+
 void virtio_blk__init(struct kvm *kvm, struct disk_image *disk)
 {
 	u16 blk_dev_base_addr;
-	u8 dev, pin, line;
+	u8 dev, pin, line, i;
 	struct blk_dev *bdev;
+	struct ioevent ioevent;
 
 	if (!disk)
 		return;
@@ -293,6 +302,20 @@ void virtio_blk__init(struct kvm *kvm, struct disk_image *disk)
 	bdev->pci_hdr.irq_line	= line;
 
 	pci__register(&bdev->pci_hdr, dev);
+
+	for (i = 0; i < NUM_VIRT_QUEUES; i++) {
+		ioevent = (struct ioevent) {
+			.io_addr		= blk_dev_base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
+			.io_len			= sizeof(u16),
+			.fn			= ioevent_callback,
+			.datamatch		= i,
+			.fn_ptr			= &bdev->jobs[i],
+			.fn_kvm			= kvm,
+			.fd			= eventfd(0, 0),
+		};
+
+		ioeventfd__add_event(&ioevent);
+	}
 }
 
 void virtio_blk__init_all(struct kvm *kvm)
@@ -309,6 +332,7 @@ void virtio_blk__delete_all(struct kvm *kvm)
 		struct blk_dev *bdev;
 
 		bdev = list_first_entry(&bdevs, struct blk_dev, list);
+		ioeventfd__del_event(bdev->base_addr + VIRTIO_PCI_QUEUE_NOTIFY, 0);
 		list_del(&bdev->list);
 		free(bdev);
 	}
-- 
1.7.5.rc3


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-05-27 16:18 [PATCH v2 1/4] kvm tools: Add ioeventfd support Sasha Levin
  2011-05-27 16:18 ` [PATCH v2 2/4] kvm tools: Use ioeventfd in virtio-blk Sasha Levin
@ 2011-05-27 16:18 ` Sasha Levin
  2011-05-29 12:51   ` Ingo Molnar
  2011-05-30 23:53   ` Asias He
  2011-05-27 16:18 ` [PATCH v2 4/4] kvm tools: Use ioeventfd in virtio-rng Sasha Levin
  2011-05-27 16:52 ` [PATCH v2 1/4] kvm tools: Add ioeventfd support Ingo Molnar
  3 siblings, 2 replies; 13+ messages in thread
From: Sasha Levin @ 2011-05-27 16:18 UTC (permalink / raw)
  To: penberg
  Cc: john, kvm, mingo, asias.hejun, gorcunov, prasadjoshi124,
	Sasha Levin

Use ioeventfds to receive notifications of IO events in virtio-net.
Doing so prevents an exit every time we receive/send a packet.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/virtio/net.c |   22 ++++++++++++++++++++++
 1 files changed, 22 insertions(+), 0 deletions(-)

diff --git a/tools/kvm/virtio/net.c b/tools/kvm/virtio/net.c
index 3064da6..5c39c43 100644
--- a/tools/kvm/virtio/net.c
+++ b/tools/kvm/virtio/net.c
@@ -8,6 +8,7 @@
 #include "kvm/kvm.h"
 #include "kvm/pci.h"
 #include "kvm/irq.h"
+#include "kvm/ioeventfd.h"
 
 #include <linux/virtio_net.h>
 #include <linux/if_tun.h>
@@ -280,6 +281,11 @@ static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 po
 	return ret;
 }
 
+static void ioevent_callback(struct kvm *kvm, void *param)
+{
+	virtio_net_handle_callback(kvm, (u64)param);
+}
+
 static struct ioport_operations virtio_net_io_ops = {
 	.io_in	= virtio_net_pci_io_in,
 	.io_out	= virtio_net_pci_io_out,
@@ -388,6 +394,8 @@ void virtio_net__init(const struct virtio_net_parameters *params)
 	if (virtio_net__tap_init(params)) {
 		u8 dev, line, pin;
 		u16 net_base_addr;
+		u64 i;
+		struct ioevent ioevent;
 
 		if (irq__register_device(VIRTIO_ID_NET, &dev, &pin, &line) < 0)
 			return;
@@ -401,5 +409,19 @@ void virtio_net__init(const struct virtio_net_parameters *params)
 		pci__register(&pci_header, dev);
 
 		virtio_net__io_thread_init(params->kvm);
+
+		for (i = 0; i < VIRTIO_NET_NUM_QUEUES; i++) {
+			ioevent = (struct ioevent) {
+				.io_addr		= net_base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
+				.io_len			= sizeof(u16),
+				.fn			= ioevent_callback,
+				.datamatch		= i,
+				.fn_ptr			= (void *)i,
+				.fn_kvm			= params->kvm,
+				.fd			= eventfd(0, 0),
+			};
+
+			ioeventfd__add_event(&ioevent);
+		}
 	}
 }
-- 
1.7.5.rc3


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2 4/4] kvm tools: Use ioeventfd in virtio-rng
  2011-05-27 16:18 [PATCH v2 1/4] kvm tools: Add ioeventfd support Sasha Levin
  2011-05-27 16:18 ` [PATCH v2 2/4] kvm tools: Use ioeventfd in virtio-blk Sasha Levin
  2011-05-27 16:18 ` [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net Sasha Levin
@ 2011-05-27 16:18 ` Sasha Levin
  2011-05-27 16:52 ` [PATCH v2 1/4] kvm tools: Add ioeventfd support Ingo Molnar
  3 siblings, 0 replies; 13+ messages in thread
From: Sasha Levin @ 2011-05-27 16:18 UTC (permalink / raw)
  To: penberg
  Cc: john, kvm, mingo, asias.hejun, gorcunov, prasadjoshi124,
	Sasha Levin

Use ioeventfds to receive notifications of IO events in virtio-rng.
Doing so prevents an exit every time we need to supply randomness
to the guest.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/virtio/rng.c |   27 ++++++++++++++++++++++++++-
 1 files changed, 26 insertions(+), 1 deletions(-)

diff --git a/tools/kvm/virtio/rng.c b/tools/kvm/virtio/rng.c
index 1a3bda3..d5d7795c 100644
--- a/tools/kvm/virtio/rng.c
+++ b/tools/kvm/virtio/rng.c
@@ -10,6 +10,7 @@
 #include "kvm/pci.h"
 #include "kvm/threadpool.h"
 #include "kvm/irq.h"
+#include "kvm/ioeventfd.h"
 
 #include <linux/virtio_ring.h>
 #include <linux/virtio_rng.h>
@@ -154,6 +155,7 @@ static bool virtio_rng_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 po
 		break;
 	case VIRTIO_PCI_QUEUE_NOTIFY: {
 		u16 queue_index;
+		printf("regular exit");
 		queue_index		= ioport__read16(data);
 		thread_pool__do_job(rdev->jobs[queue_index].job_id);
 		break;
@@ -177,11 +179,19 @@ static struct ioport_operations virtio_rng_io_ops = {
 	.io_out				= virtio_rng_pci_io_out,
 };
 
+static void ioevent_callback(struct kvm *kvm, void *param)
+{
+	struct rng_dev_job *job = param;
+
+	thread_pool__do_job(job->job_id);
+}
+
 void virtio_rng__init(struct kvm *kvm)
 {
-	u8 pin, line, dev;
+	u8 pin, line, dev, i;
 	u16 rdev_base_addr;
 	struct rng_dev *rdev;
+	struct ioevent ioevent;
 
 	rdev = malloc(sizeof(*rdev));
 	if (rdev == NULL)
@@ -213,6 +223,20 @@ void virtio_rng__init(struct kvm *kvm)
 	pci__register(&rdev->pci_hdr, dev);
 
 	list_add_tail(&rdev->list, &rdevs);
+
+	for (i = 0; i < NUM_VIRT_QUEUES; i++) {
+		ioevent = (struct ioevent) {
+			.io_addr		= rdev_base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
+			.io_len			= sizeof(u16),
+			.fn			= ioevent_callback,
+			.fn_ptr			= &rdev->jobs[i],
+			.datamatch		= i,
+			.fn_kvm			= kvm,
+			.fd			= eventfd(0, 0),
+		};
+
+		ioeventfd__add_event(&ioevent);
+	}
 }
 
 void virtio_rng__delete_all(struct kvm *kvm)
@@ -222,6 +246,7 @@ void virtio_rng__delete_all(struct kvm *kvm)
 
 		rdev = list_first_entry(&rdevs, struct rng_dev, list);
 		list_del(&rdev->list);
+		ioeventfd__del_event(rdev->base_addr + VIRTIO_PCI_QUEUE_NOTIFY, 0);
 		free(rdev);
 	}
 }
-- 
1.7.5.rc3


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/4] kvm tools: Add ioeventfd support
  2011-05-27 16:18 [PATCH v2 1/4] kvm tools: Add ioeventfd support Sasha Levin
                   ` (2 preceding siblings ...)
  2011-05-27 16:18 ` [PATCH v2 4/4] kvm tools: Use ioeventfd in virtio-rng Sasha Levin
@ 2011-05-27 16:52 ` Ingo Molnar
  2011-05-29 11:55   ` Pekka Enberg
  3 siblings, 1 reply; 13+ messages in thread
From: Ingo Molnar @ 2011-05-27 16:52 UTC (permalink / raw)
  To: Sasha Levin; +Cc: penberg, john, kvm, asias.hejun, gorcunov, prasadjoshi124


* Sasha Levin <levinsasha928@gmail.com> wrote:

> Benchmarks ran on a seperate (non boot) 1GB virtio-blk device, 
> formatted as ext4, using bonnie++.
> 
> cmd line:
> # bonnie++ -d temp/ -c 2 -s 768 -u 0
> 
> Before:
> Version  1.96       ------Sequential Output------ --Sequential Input- --Random-
> Concurrency   2     -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--
> Machine        Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP  /sec %CP
> tux            768M   498  99 381127  74 269712  48  2871  99 717109  50 +++++ +++
> Latency             18368us   31063us   21281us    3017us    6875us     251ms
> Version  1.96       ------Sequential Create------ --------Random Create--------
> tux                 -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--
>               files  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP
>                  16 +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++
> Latency               148us     588us    2792us    1547us    1543us     218us
> 
> After:
> Version  1.96       ------Sequential Output------ --Sequential Input- --Random-
> Concurrency   2     -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--
> Machine        Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP  /sec %CP
> tux            768M   499  99 459779  73 350689  54  2997  99 860395  58 +++++ +++
> Latency             17194us   14619us   26358us    4055us    7890us   44122us
> Version  1.96       ------Sequential Create------ --------Random Create--------
> tux                 -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--
>               files  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP  /sec %CP
>                  16 +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++ +++++ +++
> Latency               135us     567us    2564us     134us    1500us     231us

In such cases it would *really* be useful, in addition of dumping 
80-100 raw numbers to summarize results and compare them for the 
reader, as i suspect you've done already? Please don't keep it a 
secret and don't force the reader to compare two tables with 80+ raw 
numbers! :-)

Something like:

" In short, block writes are 20% faster, block reads are 19.9% 
  faster, seeks got 16% faster. None of the operations got slower. "

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 1/4] kvm tools: Add ioeventfd support
  2011-05-27 16:52 ` [PATCH v2 1/4] kvm tools: Add ioeventfd support Ingo Molnar
@ 2011-05-29 11:55   ` Pekka Enberg
  0 siblings, 0 replies; 13+ messages in thread
From: Pekka Enberg @ 2011-05-29 11:55 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: Sasha Levin, john, kvm, asias.hejun, gorcunov, prasadjoshi124

On Fri, 2011-05-27 at 18:52 +0200, Ingo Molnar wrote:
> In such cases it would *really* be useful, in addition of dumping 
> 80-100 raw numbers to summarize results and compare them for the 
> reader, as i suspect you've done already? Please don't keep it a 
> secret and don't force the reader to compare two tables with 80+ raw 
> numbers! :-)
> 
> Something like:
> 
> " In short, block writes are 20% faster, block reads are 19.9% 
>   faster, seeks got 16% faster. None of the operations got slower. "

Definitely. I forgot to update the changelog but please remember to
provide *both* a summary and as much as the raw data as is reasonable in
the patch description.

			Pekka


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-05-27 16:18 ` [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net Sasha Levin
@ 2011-05-29 12:51   ` Ingo Molnar
  2011-05-30 23:53   ` Asias He
  1 sibling, 0 replies; 13+ messages in thread
From: Ingo Molnar @ 2011-05-29 12:51 UTC (permalink / raw)
  To: Sasha Levin; +Cc: penberg, john, kvm, asias.hejun, gorcunov, prasadjoshi124


* Sasha Levin <levinsasha928@gmail.com> wrote:

> Use ioeventfds to receive notifications of IO events in virtio-net.
> Doing so prevents an exit every time we receive/send a packet.
> 
> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> ---
>  tools/kvm/virtio/net.c |   22 ++++++++++++++++++++++
>  1 files changed, 22 insertions(+), 0 deletions(-)

This needs the fix below to build on 32-bit.

Thanks,

	Ingo

Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/tools/kvm/virtio/net.c b/tools/kvm/virtio/net.c
index 5c39c43..6916af6 100644
--- a/tools/kvm/virtio/net.c
+++ b/tools/kvm/virtio/net.c
@@ -283,7 +283,7 @@ static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 po
 
 static void ioevent_callback(struct kvm *kvm, void *param)
 {
-	virtio_net_handle_callback(kvm, (u64)param);
+	virtio_net_handle_callback(kvm, (u64)(long)param);
 }
 
 static struct ioport_operations virtio_net_io_ops = {
@@ -416,7 +416,7 @@ void virtio_net__init(const struct virtio_net_parameters *params)
 				.io_len			= sizeof(u16),
 				.fn			= ioevent_callback,
 				.datamatch		= i,
-				.fn_ptr			= (void *)i,
+				.fn_ptr			= (void *)(long)i,
 				.fn_kvm			= params->kvm,
 				.fd			= eventfd(0, 0),
 			};


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-05-27 16:18 ` [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net Sasha Levin
  2011-05-29 12:51   ` Ingo Molnar
@ 2011-05-30 23:53   ` Asias He
  2011-05-31  7:18     ` Pekka Enberg
  1 sibling, 1 reply; 13+ messages in thread
From: Asias He @ 2011-05-30 23:53 UTC (permalink / raw)
  To: Sasha Levin; +Cc: penberg, john, kvm, mingo, gorcunov, prasadjoshi124

On 05/28/2011 12:18 AM, Sasha Levin wrote:
> Use ioeventfds to receive notifications of IO events in virtio-net.
> Doing so prevents an exit every time we receive/send a packet.
> 
> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>

Hi, Sasha

Here below are the test results you asked yesterday with your ioeventfd
changes.

----------------------------------
TAP based network performance with ioeventfd

(host -> guest)
hj:~# netperf -c -t TCP_STREAM -H guest
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to guest
(192.168.33.15) port 0 AF_INET : demo
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB   us/KB

 87380  16384  16384    10.00      5477.07   60.01    -1.00    3.590
-1.000

hj:~# netperf -c -t UDP_STREAM -H guest
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
guest (192.168.33.15) port 0 AF_INET : demo
Socket  Message  Elapsed      Messages                   CPU      Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

114688   65507   10.00      221207      0    11592.2     73.33    8.368
110592           10.00       54793            2871.4     -1.00    -1.000

(guest-> host)
root@sid2:~# netperf -c -t TCP_STREAM -H host
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to host
(192.168.33.2) port 0 AF_INET : demo
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB   us/KB

 87380  16384  16384    10.00      1724.30   98.70    -1.00    4.689
-1.000

root@sid2:~# netperf -c -t UDP_STREAM -H host
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
host (192.168.33.2) port 0 AF_INET : demo
Socket  Message  Elapsed      Messages                   CPU      Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

110592   65507   10.00      117986      0     6182.8     99.90    1.364
114688           10.00      114480            5999.1     -1.00    -1.000


-- 
Best Regards,
Asias He

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-05-30 23:53   ` Asias He
@ 2011-05-31  7:18     ` Pekka Enberg
  2011-05-31 16:32       ` Sasha Levin
  0 siblings, 1 reply; 13+ messages in thread
From: Pekka Enberg @ 2011-05-31  7:18 UTC (permalink / raw)
  To: Asias He; +Cc: Sasha Levin, john, kvm, mingo, gorcunov, prasadjoshi124

On Tue, May 31, 2011 at 2:53 AM, Asias He <asias.hejun@gmail.com> wrote:
> TAP based network performance with ioeventfd

Heh, so how did it look _before_ ioeventfd? Did performance improve
and how much?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-05-31  7:18     ` Pekka Enberg
@ 2011-05-31 16:32       ` Sasha Levin
  2011-06-01  1:41         ` Asias He
  0 siblings, 1 reply; 13+ messages in thread
From: Sasha Levin @ 2011-05-31 16:32 UTC (permalink / raw)
  To: Pekka Enberg; +Cc: Asias He, john, kvm, mingo, gorcunov, prasadjoshi124

On Tue, 2011-05-31 at 10:18 +0300, Pekka Enberg wrote:
> On Tue, May 31, 2011 at 2:53 AM, Asias He <asias.hejun@gmail.com> wrote:
> > TAP based network performance with ioeventfd
> 
> Heh, so how did it look _before_ ioeventfd? Did performance improve
> and how much?

Asias, did you use TCP or UDP values as bandwidth in your previous test?

-- 

Sasha.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-05-31 16:32       ` Sasha Levin
@ 2011-06-01  1:41         ` Asias He
  2011-06-01  6:35           ` Sasha Levin
  0 siblings, 1 reply; 13+ messages in thread
From: Asias He @ 2011-06-01  1:41 UTC (permalink / raw)
  To: Sasha Levin; +Cc: Pekka Enberg, john, kvm, mingo, gorcunov, prasadjoshi124

On 06/01/2011 12:32 AM, Sasha Levin wrote:
> On Tue, 2011-05-31 at 10:18 +0300, Pekka Enberg wrote:
>> On Tue, May 31, 2011 at 2:53 AM, Asias He <asias.hejun@gmail.com> wrote:
>>> TAP based network performance with ioeventfd
>>
>> Heh, so how did it look _before_ ioeventfd? Did performance improve
>> and how much?
> 
> Asias, did you use TCP or UDP values as bandwidth in your previous test?
> 

These commit log have the performance test result before ioeventfd. But
the UDP one is missing.

commit 739ddbb3b0fe52aa90a84727a6e90da37ce7661b
commit 4ed38b41fc034cfb51fec2004f523fe98faa27f6


    Netpef test shows this patch changes:

    the host to guest bandwidth
    from 2866.27 Mbps (cpu 33.96%) to 5548.87 Mbps (cpu 53.87%),

    the guest to host bandwitdth
    form 1408.86 Mbps (cpu 99.9%) to 1301.29 Mbps (cpu 99.9%).


Anyway, I did another test and post the result here:

Test shows host -> guest TCP performance drops from 6736.04 to 5562.25.
guest -> host TCP performance dumps from 1572.51 to 1731.55.

---------------------------------------------------------------
TAP based network performance *with* ioeventfd with
commit 6dfc8581f14d355a89e7edaf4394ee8c69177f1f
---------------------------------------------------------------
(host -> guest)
hj:~# netperf -c -t TCP_STREAM -H guest
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to guest
(192.168.33.15) port 0 AF_INET : demo
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB   us/KB

 87380  16384  16384    10.00      5562.25   45.74    -1.00    2.694
-1.000
hj:~# netperf -c -t UDP_STREAM -H guest
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
guest (192.168.33.15) port 0 AF_INET : demo
Socket  Message  Elapsed      Messages                   CPU      Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

114688   65507   10.00      275048      0    14413.5     71.65    11.516
110592           10.00       38904            2038.7     -1.00    -1.000


(guest-> host)
root@sid2:~# netperf -c -t TCP_STREAM -H host
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to host
(192.168.33.2) port 0 AF_INET : demo
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB   us/KB

 87380  16384  16384    10.00      1731.55   99.30    -1.00    4.698
-1.000
root@sid2:~# netperf -c -t UDP_STREAM -H host
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
host (192.168.33.2) port 0 AF_INET : demo
Socket  Message  Elapsed      Messages                   CPU      Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

110592   65507   10.00      134084      0     7026.4     99.80    1.169
114688           10.00      133473            6994.4     -1.00    -1.000



---------------------------------------------------------------
TAP based network performance *without* ioeventfd with
commit dd2e19aee64ad93d170fc7f285c7072aa4e360a0
---------------------------------------------------------------
(host -> guest)
hj:~# netperf -c -t TCP_STREAM -H guest
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to guest
(192.168.33.15) port 0 AF_INET : demo
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB   us/KB

 87380  16384  16384    10.00      6736.04   39.58    -1.00    1.925
-1.000
hj:~# netperf -c -t UDP_STREAM -H guest
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
guest (192.168.33.15) port 0 AF_INET : demo
Socket  Message  Elapsed      Messages                   CPU      Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

112640   65507   10.00      289263      0    15158.4     63.30    4.832
110592           10.00       81910            4292.4     -1.00    -1.000

(guest-> host)
root@sid2:~#  netperf -c -t TCP_STREAM -H host
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to host
(192.168.33.2) port 0 AF_INET : demo
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB   us/KB

 87380  16384  16384    10.00      1572.51   99.80    -1.00    5.199
-1.000
root@sid2:~#  netperf -c -t UDP_STREAM -H host
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
host (192.168.33.2) port 0 AF_INET : demo
Socket  Message  Elapsed      Messages                   CPU      Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

110592   65507   10.00      117100      0     6136.4     99.90    1.335
112640           10.00      117023            6132.4     -1.00    -1.000




-- 
Best Regards,
Asias He

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-06-01  1:41         ` Asias He
@ 2011-06-01  6:35           ` Sasha Levin
  2011-06-01 11:11             ` Asias He
  0 siblings, 1 reply; 13+ messages in thread
From: Sasha Levin @ 2011-06-01  6:35 UTC (permalink / raw)
  To: Asias He; +Cc: Pekka Enberg, john, kvm, mingo, gorcunov, prasadjoshi124

On Wed, 2011-06-01 at 09:41 +0800, Asias He wrote:
> On 06/01/2011 12:32 AM, Sasha Levin wrote:
> > On Tue, 2011-05-31 at 10:18 +0300, Pekka Enberg wrote:
> >> On Tue, May 31, 2011 at 2:53 AM, Asias He <asias.hejun@gmail.com> wrote:
> >>> TAP based network performance with ioeventfd
> >>
> >> Heh, so how did it look _before_ ioeventfd? Did performance improve
> >> and how much?
> > 
> > Asias, did you use TCP or UDP values as bandwidth in your previous test?
> > 
> 
> These commit log have the performance test result before ioeventfd. But
> the UDP one is missing.
> 
> commit 739ddbb3b0fe52aa90a84727a6e90da37ce7661b
> commit 4ed38b41fc034cfb51fec2004f523fe98faa27f6
> 
> 
>     Netpef test shows this patch changes:
> 
>     the host to guest bandwidth
>     from 2866.27 Mbps (cpu 33.96%) to 5548.87 Mbps (cpu 53.87%),
> 
>     the guest to host bandwitdth
>     form 1408.86 Mbps (cpu 99.9%) to 1301.29 Mbps (cpu 99.9%).
> 
> 
> Anyway, I did another test and post the result here:
> 
> Test shows host -> guest TCP performance drops from 6736.04 to 5562.25.
> guest -> host TCP performance dumps from 1572.51 to 1731.55.

That's quite strange. I wasn't expecting any changes with our current
network code: Our RX thread is blocking on readv() most of the time, so
it doesn't get affected by IRQ/ioeventfd signals at all, and the TX
thread should get signaled to wake up just once or twice when the stream
starts - after which the virtio ring should be full with data.

I installed netperf and ran the tests (full results are below), guest ->
host TCP changed from 1862 to 2567 and host -> guest TCP changed from
7716 to 8065.

----------

Before ioeventfd:

(guest -> host)
tux ~ # netperf -c -t TCP_STREAM -H 192.168.33.2
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.33.2
(192.168.33.2) port 0 AF_INET
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send
Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB
us/KB

 87380  16384  16384    10.00      1862.98   49.75    -1.00    4.375
-1.000 

tux ~ # netperf -c -t UDP_STREAM -H 192.168.33.2
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
192.168.33.2 (192.168.33.2) port 0 AF_INET
Socket  Message  Elapsed      Messages                   CPU
Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

124928   65507   10.00      301117      0    15779.9     43.13    0.449 
124928           10.00      300118           15727.6     -1.00    -1.000

(host -> guest)
lappy ~ # netperf -c -t TCP_STREAM -H 192.168.33.4
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.33.4
(192.168.33.4) port 0 AF_INET
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send
Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB
us/KB

 87380  16384  16384    10.00      7716.23   87.49    -1.00    1.858
-1.000 

lappy ~ # netperf -c -t UDP_STREAM -H 192.168.33.4
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
192.168.33.4 (192.168.33.4) port 0 AF_INET
Socket  Message  Elapsed      Messages                   CPU
Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

124928   65507   10.00      142037      0     7443.4     84.34    6.398 
124928           10.00       41211            2159.6     -1.00    -1.000

After ioeventfd:

(guest -> host)
tux ~ # netperf -c -t TCP_STREAM -H 192.168.33.2
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.33.2
(192.168.33.2) port 0 AF_INET
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send
Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB
us/KB

 87380  16384  16384    10.00      2567.83   47.29    -1.00    3.017
-1.000 
tux ~ # netperf -c -t UDP_STREAM -H 192.168.33.2
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
192.168.33.2 (192.168.33.2) port 0 AF_INET
Socket  Message  Elapsed      Messages                   CPU
Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

124928   65507   10.00      305265      0    15997.4     49.35    0.506 
124928           10.00      304818           15974.0     -1.00    -1.000

(host -> guest)
lappy ~ # netperf -c -t TCP_STREAM -H 192.168.33.4
TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.33.4
(192.168.33.4) port 0 AF_INET
Recv   Send    Send                          Utilization       Service
Demand
Socket Socket  Message  Elapsed              Send     Recv     Send
Recv
Size   Size    Size     Time     Throughput  local    remote   local
remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % U      us/KB
us/KB

 87380  16384  16384    10.00      8065.82   88.15    -1.00    1.791
-1.000 
lappy ~ # netperf -c -t UDP_STREAM -H 192.168.33.4
UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
192.168.33.4 (192.168.33.4) port 0 AF_INET
Socket  Message  Elapsed      Messages                   CPU
Service
Size    Size     Time         Okay Errors   Throughput   Util     Demand
bytes   bytes    secs            #      #   10^6bits/sec % SU     us/KB

124928   65507   10.00      137846      0     7223.8     83.97    6.540 
124928           10.00       40140            2103.5     -1.00    -1.000





-- 

Sasha.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net
  2011-06-01  6:35           ` Sasha Levin
@ 2011-06-01 11:11             ` Asias He
  0 siblings, 0 replies; 13+ messages in thread
From: Asias He @ 2011-06-01 11:11 UTC (permalink / raw)
  To: Sasha Levin; +Cc: Pekka Enberg, john, kvm, mingo, gorcunov, prasadjoshi124

On 06/01/2011 02:35 PM, Sasha Levin wrote:
> On Wed, 2011-06-01 at 09:41 +0800, Asias He wrote:
>> On 06/01/2011 12:32 AM, Sasha Levin wrote:
>>> On Tue, 2011-05-31 at 10:18 +0300, Pekka Enberg wrote:
>>>> On Tue, May 31, 2011 at 2:53 AM, Asias He <asias.hejun@gmail.com> wrote:
>>>>> TAP based network performance with ioeventfd
>>>>
>>>> Heh, so how did it look _before_ ioeventfd? Did performance improve
>>>> and how much?
>>>
>>> Asias, did you use TCP or UDP values as bandwidth in your previous test?
>>>
>>
>> These commit log have the performance test result before ioeventfd. But
>> the UDP one is missing.
>>
>> commit 739ddbb3b0fe52aa90a84727a6e90da37ce7661b
>> commit 4ed38b41fc034cfb51fec2004f523fe98faa27f6
>>
>>
>>     Netpef test shows this patch changes:
>>
>>     the host to guest bandwidth
>>     from 2866.27 Mbps (cpu 33.96%) to 5548.87 Mbps (cpu 53.87%),
>>
>>     the guest to host bandwitdth
>>     form 1408.86 Mbps (cpu 99.9%) to 1301.29 Mbps (cpu 99.9%).
>>
>>
>> Anyway, I did another test and post the result here:
>>
>> Test shows host -> guest TCP performance drops from 6736.04 to 5562.25.
>> guest -> host TCP performance dumps from 1572.51 to 1731.55.
> 
> That's quite strange. I wasn't expecting any changes with our current
> network code: Our RX thread is blocking on readv() most of the time, so
> it doesn't get affected by IRQ/ioeventfd signals at all, and the TX
> thread should get signaled to wake up just once or twice when the stream
> starts - after which the virtio ring should be full with data.
> 
> I installed netperf and ran the tests (full results are below), guest ->
> host TCP changed from 1862 to 2567 and host -> guest TCP changed from
> 7716 to 8065.

I have no idea why I am seeing this regression in my box. However, your
change should not introduce this regression.


-- 
Best Regards,
Asias He

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2011-06-01 11:13 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-05-27 16:18 [PATCH v2 1/4] kvm tools: Add ioeventfd support Sasha Levin
2011-05-27 16:18 ` [PATCH v2 2/4] kvm tools: Use ioeventfd in virtio-blk Sasha Levin
2011-05-27 16:18 ` [PATCH v2 3/4] kvm tools: Use ioeventfd in virtio-net Sasha Levin
2011-05-29 12:51   ` Ingo Molnar
2011-05-30 23:53   ` Asias He
2011-05-31  7:18     ` Pekka Enberg
2011-05-31 16:32       ` Sasha Levin
2011-06-01  1:41         ` Asias He
2011-06-01  6:35           ` Sasha Levin
2011-06-01 11:11             ` Asias He
2011-05-27 16:18 ` [PATCH v2 4/4] kvm tools: Use ioeventfd in virtio-rng Sasha Levin
2011-05-27 16:52 ` [PATCH v2 1/4] kvm tools: Add ioeventfd support Ingo Molnar
2011-05-29 11:55   ` Pekka Enberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox