public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Sasha Levin <levinsasha928@gmail.com>
To: penberg@kernel.org
Cc: kvm@vger.kernel.org, mingo@elte.hu, asias.hejun@gmail.com,
	gorcunov@gmail.com, Sasha Levin <levinsasha928@gmail.com>
Subject: [PATCH 3/3] kvm tools: Add MSI-X support to virtio-net
Date: Thu, 11 Aug 2011 15:43:56 +0300	[thread overview]
Message-ID: <1313066636-28332-3-git-send-email-levinsasha928@gmail.com> (raw)
In-Reply-To: <1313066636-28332-1-git-send-email-levinsasha928@gmail.com>

The device uses the virtio preferred method of working with MSI-X by
creating one vector for configuration and one vector for each vq in the
device.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 tools/kvm/virtio/net.c |   54 +++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 49 insertions(+), 5 deletions(-)

diff --git a/tools/kvm/virtio/net.c b/tools/kvm/virtio/net.c
index e865b7f..35d4997 100644
--- a/tools/kvm/virtio/net.c
+++ b/tools/kvm/virtio/net.c
@@ -60,6 +60,9 @@ struct net_dev {
 	u8				isr;
 	u16				queue_selector;
 	u16				base_addr;
+	u32				vq_vector[VIRTIO_NET_NUM_QUEUES];
+	u32				gsis[VIRTIO_NET_NUM_QUEUES];
+	u32				msix_io_block;
 
 	pthread_t			io_rx_thread;
 	pthread_mutex_t			io_rx_lock;
@@ -125,7 +128,7 @@ static void *virtio_net_rx_thread(void *p)
 			virt_queue__set_used_elem(vq, head, len);
 
 			/* We should interrupt guest right now, otherwise latency is huge. */
-			virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
+			kvm__irq_trigger(kvm, ndev.gsis[VIRTIO_NET_RX_QUEUE]);
 		}
 
 	}
@@ -162,8 +165,7 @@ static void *virtio_net_tx_thread(void *p)
 			virt_queue__set_used_elem(vq, head, len);
 		}
 
-		virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
-
+		kvm__irq_trigger(kvm, ndev.gsis[VIRTIO_NET_TX_QUEUE]);
 	}
 
 	pthread_exit(NULL);
@@ -219,6 +221,12 @@ static bool virtio_net_pci_io_in(struct ioport *ioport, struct kvm *kvm, u16 por
 		kvm__irq_line(kvm, pci_header.irq_line, VIRTIO_IRQ_LOW);
 		ndev.isr = VIRTIO_IRQ_LOW;
 		break;
+	case VIRTIO_MSI_CONFIG_VECTOR:
+		ioport__write16(data, ndev.config_vector);
+		break;
+	case VIRTIO_MSI_QUEUE_VECTOR:
+		ioport__write16(data, ndev.vq_vector[ndev.queue_selector]);
+		break;
 	default:
 		ret = virtio_net_pci_io_device_specific_in(data, offset, size, count);
 	};
@@ -285,10 +293,22 @@ static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 po
 		ndev.status		= ioport__read8(data);
 		break;
 	case VIRTIO_MSI_CONFIG_VECTOR:
-		ndev.config_vector	= VIRTIO_MSI_NO_VECTOR;
+		ndev.config_vector	= ioport__read16(data);
 		break;
-	case VIRTIO_MSI_QUEUE_VECTOR:
+	case VIRTIO_MSI_QUEUE_VECTOR: {
+		u32 gsi;
+		u32 vec;
+
+		vec = ndev.vq_vector[ndev.queue_selector] = ioport__read16(data);
+
+		gsi = irq__add_msix_route(kvm,
+					  pci_header.msix.table[vec].low,
+					  pci_header.msix.table[vec].high,
+					  pci_header.msix.table[vec].data);
+
+		ndev.gsis[ndev.queue_selector] = gsi;
 		break;
+	}
 	default:
 		ret			= false;
 	};
@@ -308,6 +328,15 @@ static struct ioport_operations virtio_net_io_ops = {
 	.io_out	= virtio_net_pci_io_out,
 };
 
+static void callback_mmio(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr)
+{
+	void *table = pci_header.msix.table;
+	if (is_write)
+		memcpy(table + addr - ndev.msix_io_block, data, len);
+	else
+		memcpy(data, table + addr - ndev.msix_io_block, len);
+}
+
 static bool virtio_net__tap_init(const struct virtio_net_parameters *params)
 {
 	int sock = socket(AF_INET, SOCK_STREAM, 0);
@@ -467,6 +496,21 @@ void virtio_net__init(const struct virtio_net_parameters *params)
 		ndev.ops = &uip_ops;
 	}
 
+	ndev.msix_io_block = pci_get_io_space_block();
+	kvm__register_mmio(params->kvm, ndev.msix_io_block, 0x100, callback_mmio, NULL);
+	pci_header.bar[1]	= ndev.msix_io_block |
+				PCI_BASE_ADDRESS_SPACE_MEMORY |
+				PCI_BASE_ADDRESS_MEM_TYPE_64;
+	/* bar[2] is the continuation of bar[1] for 64bit addressing */
+	pci_header.bar[2]	= 0;
+	pci_header.status	= PCI_STATUS_CAP_LIST;
+	pci_header.capabilities	= (void *)&pci_header.msix - (void *)&pci_header;
+
+	pci_header.msix.cap = PCI_CAP_ID_MSIX;
+	pci_header.msix.next = 0;
+	pci_header.msix.table_size = (VIRTIO_NET_NUM_QUEUES + 1) | PCI_MSIX_FLAGS_ENABLE;
+	pci_header.msix.table_offset = 1; /* Use BAR 1 */
+
 	virtio_net__io_thread_init(params->kvm);
 
 	for (i = 0; i < VIRTIO_NET_NUM_QUEUES; i++) {
-- 
1.7.6


  parent reply	other threads:[~2011-08-11 12:45 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-08-11 12:43 [PATCH 1/3] kvm tools: Make keyboard termination go through regular termination path Sasha Levin
2011-08-11 12:43 ` [PATCH 2/3] kvm tools: Add kvm__trigger_irq() Sasha Levin
2011-08-11 12:43 ` Sasha Levin [this message]
2011-08-11 13:33 ` [PATCH 1/3] kvm tools: Make keyboard termination go through regular termination path walimis
2011-08-11 13:39 ` Pekka Enberg
2011-08-11 13:41   ` Sasha Levin
2011-08-11 13:47     ` Pekka Enberg
2011-08-11 13:48       ` Sasha Levin
2011-08-11 14:04         ` Pekka Enberg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1313066636-28332-3-git-send-email-levinsasha928@gmail.com \
    --to=levinsasha928@gmail.com \
    --cc=asias.hejun@gmail.com \
    --cc=gorcunov@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=penberg@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox