kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sasha Levin <levinsasha928@gmail.com>
To: penberg@kernel.org
Cc: mingo@elte.hu, asias.hejun@gmail.com, gorcunov@gmail.com,
	prasadjoshi124@gmail.com, kvm@vger.kernel.org,
	Sasha Levin <levinsasha928@gmail.com>
Subject: [PATCH 6/6] kvm tools: Use threadpool for virtio-net
Date: Thu, 28 Apr 2011 16:40:45 +0300	[thread overview]
Message-ID: <1303998045-22932-6-git-send-email-levinsasha928@gmail.com> (raw)
In-Reply-To: <1303998045-22932-1-git-send-email-levinsasha928@gmail.com>

virtio-net has been converted to use the threadpool.
This is very similar to the change done in virtio-blk, only here we had 2 queues to handle.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/virtio-net.c |  101 ++++++++++++------------------------------------
 1 files changed, 25 insertions(+), 76 deletions(-)

diff --git a/tools/kvm/virtio-net.c b/tools/kvm/virtio-net.c
index 3e13429..58b3de4 100644
--- a/tools/kvm/virtio-net.c
+++ b/tools/kvm/virtio-net.c
@@ -7,6 +7,7 @@
 #include "kvm/util.h"
 #include "kvm/kvm.h"
 #include "kvm/pci.h"
+#include "kvm/threadpool.h"
 
 #include <linux/virtio_net.h>
 #include <linux/if_tun.h>
@@ -40,16 +41,9 @@ struct net_device {
 	uint8_t				status;
 	uint16_t			queue_selector;
 
-	pthread_t			io_rx_thread;
-	pthread_mutex_t			io_rx_mutex;
-	pthread_cond_t			io_rx_cond;
-
-	pthread_t			io_tx_thread;
-	pthread_mutex_t			io_tx_mutex;
-	pthread_cond_t			io_tx_cond;
-
 	int				tap_fd;
 	char				tap_name[IFNAMSIZ];
+	void				*jobs[VIRTIO_NET_NUM_QUEUES];
 };
 
 static struct net_device net_device = {
@@ -69,70 +63,44 @@ static struct net_device net_device = {
 				  1UL << VIRTIO_NET_F_GUEST_TSO6,
 };
 
-static void *virtio_net_rx_thread(void *p)
+static void virtio_net_rx_callback(struct kvm *self, void *param)
 {
 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
 	struct virt_queue *vq;
-	struct kvm *self;
 	uint16_t out, in;
 	uint16_t head;
 	int len;
 
-	self = p;
-	vq = &net_device.vqs[VIRTIO_NET_RX_QUEUE];
-
-	while (1) {
-		mutex_lock(&net_device.io_rx_mutex);
-		if (!virt_queue__available(vq))
-			pthread_cond_wait(&net_device.io_rx_cond, &net_device.io_rx_mutex);
-		mutex_unlock(&net_device.io_rx_mutex);
-
-		while (virt_queue__available(vq)) {
-			head = virt_queue__get_iov(vq, iov, &out, &in, self);
-			len = readv(net_device.tap_fd, iov, in);
-			virt_queue__set_used_elem(vq, head, len);
-			/* We should interrupt guest right now, otherwise latency is huge. */
-			kvm__irq_line(self, VIRTIO_NET_IRQ, 1);
-		}
+	vq = param;
 
+	while (virt_queue__available(vq)) {
+		head = virt_queue__get_iov(vq, iov, &out, &in, self);
+		len = readv(net_device.tap_fd, iov, in);
+		virt_queue__set_used_elem(vq, head, len);
 	}
 
-	pthread_exit(NULL);
-	return NULL;
-
+	kvm__irq_line(self, VIRTIO_NET_IRQ, 1);
 }
 
-static void *virtio_net_tx_thread(void *p)
+static void virtio_net_tx_callback(struct kvm *self, void *param)
 {
 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
 	struct virt_queue *vq;
-	struct kvm *self;
 	uint16_t out, in;
 	uint16_t head;
 	int len;
 
-	self = p;
-	vq = &net_device.vqs[VIRTIO_NET_TX_QUEUE];
-
-	while (1) {
-		mutex_lock(&net_device.io_tx_mutex);
-		if (!virt_queue__available(vq))
-			pthread_cond_wait(&net_device.io_tx_cond, &net_device.io_tx_mutex);
-		mutex_unlock(&net_device.io_tx_mutex);
+	vq = param;
 
-		while (virt_queue__available(vq)) {
-			head = virt_queue__get_iov(vq, iov, &out, &in, self);
-			len = writev(net_device.tap_fd, iov, out);
-			virt_queue__set_used_elem(vq, head, len);
-		}
-
-		kvm__irq_line(self, VIRTIO_NET_IRQ, 1);
+	while (virt_queue__available(vq)) {
+		head = virt_queue__get_iov(vq, iov, &out, &in, self);
+		len = writev(net_device.tap_fd, iov, out);
+		virt_queue__set_used_elem(vq, head, len);
 	}
 
-	pthread_exit(NULL);
-	return NULL;
-
+	kvm__irq_line(self, VIRTIO_NET_IRQ, 1);
 }
+
 static bool virtio_net_pci_io_device_specific_in(void *data, unsigned long offset, int size, uint32_t count)
 {
 	uint8_t *config_space = (uint8_t *) &net_device.net_config;
@@ -193,19 +161,7 @@ static bool virtio_net_pci_io_in(struct kvm *self, uint16_t port, void *data, in
 
 static void virtio_net_handle_callback(struct kvm *self, uint16_t queue_index)
 {
-	if (queue_index == VIRTIO_NET_TX_QUEUE) {
-
-		mutex_lock(&net_device.io_tx_mutex);
-		pthread_cond_signal(&net_device.io_tx_cond);
-		mutex_unlock(&net_device.io_tx_mutex);
-
-	} else if (queue_index == VIRTIO_NET_RX_QUEUE) {
-
-		mutex_lock(&net_device.io_rx_mutex);
-		pthread_cond_signal(&net_device.io_rx_cond);
-		mutex_unlock(&net_device.io_rx_mutex);
-
-	}
+	thread_pool__signal_work(net_device.jobs[queue_index]);
 }
 
 static bool virtio_net_pci_io_out(struct kvm *self, uint16_t port, void *data, int size, uint32_t count)
@@ -231,6 +187,13 @@ static bool virtio_net_pci_io_out(struct kvm *self, uint16_t port, void *data, i
 
 		vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, 4096);
 
+		if (net_device.queue_selector == VIRTIO_NET_TX_QUEUE)
+			net_device.jobs[net_device.queue_selector] =
+				thread_pool__add_jobtype(self, virtio_net_tx_callback, queue);
+		else if (net_device.queue_selector == VIRTIO_NET_RX_QUEUE)
+			net_device.jobs[net_device.queue_selector] =
+				thread_pool__add_jobtype(self, virtio_net_rx_callback, queue);
+
 		break;
 	}
 	case VIRTIO_PCI_QUEUE_SEL:
@@ -367,24 +330,10 @@ fail:
 	return 0;
 }
 
-static void virtio_net__io_thread_init(struct kvm *self)
-{
-	pthread_mutex_init(&net_device.io_rx_mutex, NULL);
-	pthread_cond_init(&net_device.io_tx_cond, NULL);
-
-	pthread_mutex_init(&net_device.io_rx_mutex, NULL);
-	pthread_cond_init(&net_device.io_tx_cond, NULL);
-
-	pthread_create(&net_device.io_rx_thread, NULL, virtio_net_rx_thread, (void *)self);
-	pthread_create(&net_device.io_tx_thread, NULL, virtio_net_tx_thread, (void *)self);
-}
-
 void virtio_net__init(const struct virtio_net_parameters *params)
 {
 	if (virtio_net__tap_init(params)) {
 		pci__register(&virtio_net_pci_device, PCI_VIRTIO_NET_DEVNUM);
 		ioport__register(IOPORT_VIRTIO_NET, &virtio_net_io_ops, IOPORT_VIRTIO_NET_SIZE);
-
-		virtio_net__io_thread_init(params->self);
 	}
 }
-- 
1.7.5.rc3


      parent reply	other threads:[~2011-04-28 13:41 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-04-28 13:40 [PATCH 1/6] kvm tools: Prevent duplicate definitions of ALIGN Sasha Levin
2011-04-28 13:40 ` [PATCH 2/6] kvm tools: Add kernel headers required for using list Sasha Levin
2011-04-28 13:40 ` [PATCH 3/6] kvm tools: Introduce generic IO threadpool Sasha Levin
2011-04-29  7:08   ` Asias He
     [not found]     ` <4DBA653A.90700@cs.helsinki.fi>
     [not found]       ` <1304064977.10069.15.camel@lappy>
2011-04-29 11:12         ` Sasha Levin
2011-04-28 13:40 ` [PATCH 4/6] kvm tools: Use threadpool for virtio-blk Sasha Levin
2011-04-28 13:40 ` [PATCH 5/6] kvm tools: Use threadpool for virtio-console Sasha Levin
2011-04-28 13:40 ` Sasha Levin [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1303998045-22932-6-git-send-email-levinsasha928@gmail.com \
    --to=levinsasha928@gmail.com \
    --cc=asias.hejun@gmail.com \
    --cc=gorcunov@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=penberg@kernel.org \
    --cc=prasadjoshi124@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).