From: Cam Macdonell <cam@cs.ualberta.ca>
To: kvm@vger.kernel.org
Cc: Cam Macdonell <cam@cs.ualberta.ca>, qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH v4] Shared memory uio_pci driver
Date: Wed, 7 Apr 2010 17:00:58 -0600 [thread overview]
Message-ID: <1270681258-9042-1-git-send-email-cam@cs.ualberta.ca> (raw)
In-Reply-To: <1270680720-8457-2-git-send-email-cam@cs.ualberta.ca>
This patch adds a driver for my shared memory PCI device using the uio_pci
interface. The driver has three memory regions. The first memory region is for
device registers for sending interrupts. The second BAR is for receiving MSI-X
interrupts and the third memory region maps the shared memory. The device only
exports the first and third memory regions to userspace.
This driver supports MSI-X and regular pin interrupts. Currently, the number
of MSI vectors is set to 2 (one for new connections and the other for
interrupts) but it could easily be increased. If MSI is not available, then
regular interrupts will be used.
This version added formatting and style corrections as well as better
error-checking and cleanup when errors occur.
---
drivers/uio/Kconfig | 8 ++
drivers/uio/Makefile | 1 +
drivers/uio/uio_ivshmem.c | 252 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 261 insertions(+), 0 deletions(-)
create mode 100644 drivers/uio/uio_ivshmem.c
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 1da73ec..b92cded 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -74,6 +74,14 @@ config UIO_SERCOS3
If you compile this as a module, it will be called uio_sercos3.
+config UIO_IVSHMEM
+ tristate "KVM shared memory PCI driver"
+ default n
+ help
+ Userspace I/O interface for the KVM shared memory device. This
+ driver will make available two memory regions, the first is
+ registers and the second is a region for sharing between VMs.
+
config UIO_PCI_GENERIC
tristate "Generic driver for PCI 2.3 and PCI Express cards"
depends on PCI
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index 18fd818..25c1ca5 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_UIO_AEC) += uio_aec.o
obj-$(CONFIG_UIO_SERCOS3) += uio_sercos3.o
obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o
obj-$(CONFIG_UIO_NETX) += uio_netx.o
+obj-$(CONFIG_UIO_IVSHMEM) += uio_ivshmem.o
diff --git a/drivers/uio/uio_ivshmem.c b/drivers/uio/uio_ivshmem.c
new file mode 100644
index 0000000..42ac9a7
--- /dev/null
+++ b/drivers/uio/uio_ivshmem.c
@@ -0,0 +1,252 @@
+/*
+ * UIO IVShmem Driver
+ *
+ * (C) 2009 Cam Macdonell
+ * based on Hilscher CIF card driver (C) 2007 Hans J. Koch <hjk@linutronix.de>
+ *
+ * Licensed under GPL version 2 only.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+
+#include <asm/io.h>
+
+#define IntrStatus 0x04
+#define IntrMask 0x00
+
+struct ivshmem_info {
+ struct uio_info *uio;
+ struct pci_dev *dev;
+ char (*msix_names)[256];
+ struct msix_entry *msix_entries;
+ int nvectors;
+};
+
+static irqreturn_t ivshmem_handler(int irq, struct uio_info *dev_info)
+{
+
+ void __iomem *plx_intscr = dev_info->mem[0].internal_addr
+ + IntrStatus;
+ u32 val;
+
+ val = readl(plx_intscr);
+ if (val == 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ivshmem_msix_handler(int irq, void *opaque)
+{
+
+ struct uio_info * dev_info = (struct uio_info *) opaque;
+
+ /* we have to do this explicitly when using MSI-X */
+ uio_event_notify(dev_info);
+ return IRQ_HANDLED;
+}
+
+static void free_msix_vectors(struct ivshmem_info *ivs_info,
+ const int max_vector)
+{
+ int i;
+
+ for (i = 0; i < max_vector; i++)
+ free_irq(ivs_info->msix_entries[i].vector, ivs_info->uio);
+}
+
+static int request_msix_vectors(struct ivshmem_info *ivs_info, int nvectors)
+{
+ int i, err;
+ const char *name = "ivshmem";
+
+ ivs_info->nvectors = nvectors;
+
+ ivs_info->msix_entries = kmalloc(nvectors * sizeof *
+ ivs_info->msix_entries,
+ GFP_KERNEL);
+ if (ivs_info->msix_entries == NULL)
+ return -ENOSPC;
+
+ ivs_info->msix_names = kmalloc(nvectors * sizeof *ivs_info->msix_names,
+ GFP_KERNEL);
+ if (ivs_info->msix_names == NULL) {
+ kfree(ivs_info->msix_entries);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nvectors; ++i)
+ ivs_info->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(ivs_info->dev, ivs_info->msix_entries,
+ ivs_info->nvectors);
+ if (err > 0) {
+ ivs_info->nvectors = err; /* msi-x positive error code
+ returns the number available*/
+ err = pci_enable_msix(ivs_info->dev, ivs_info->msix_entries,
+ ivs_info->nvectors);
+ if (err) {
+ printk(KERN_INFO "no MSI (%d). Back to INTx.\n", err);
+ goto error;
+ }
+ }
+
+ if (err)
+ goto error;
+
+ for (i = 0; i < ivs_info->nvectors; i++) {
+
+ snprintf(ivs_info->msix_names[i], sizeof *ivs_info->msix_names,
+ "%s-config", name);
+
+ err = request_irq(ivs_info->msix_entries[i].vector,
+ ivshmem_msix_handler, 0,
+ ivs_info->msix_names[i], ivs_info->uio);
+
+ if (err) {
+ free_msix_vectors(ivs_info, i - 1);
+ goto error;
+ }
+
+ }
+
+ return 0;
+error:
+ kfree(ivs_info->msix_entries);
+ kfree(ivs_info->msix_names);
+ return err;
+
+}
+
+static int __devinit ivshmem_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct uio_info *info;
+ struct ivshmem_info * ivshmem_info;
+ int nvectors = 4;
+
+ info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ivshmem_info = kzalloc(sizeof(struct ivshmem_info), GFP_KERNEL);
+ if (!ivshmem_info) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ if (pci_enable_device(dev))
+ goto out_free;
+
+ if (pci_request_regions(dev, "ivshmem"))
+ goto out_disable;
+
+ info->mem[0].addr = pci_resource_start(dev, 0);
+ if (!info->mem[0].addr)
+ goto out_release;
+
+ info->mem[0].size = pci_resource_len(dev, 0);
+ info->mem[0].internal_addr = pci_ioremap_bar(dev, 0);
+ if (!info->mem[0].internal_addr) {
+ goto out_release;
+ }
+
+ info->mem[0].memtype = UIO_MEM_PHYS;
+
+ info->mem[1].addr = pci_resource_start(dev, 2);
+ if (!info->mem[1].addr)
+ goto out_unmap;
+ info->mem[1].internal_addr = pci_ioremap_bar(dev, 2);
+ if (!info->mem[1].internal_addr)
+ goto out_unmap;
+
+ info->mem[1].size = pci_resource_len(dev, 2);
+ info->mem[1].memtype = UIO_MEM_PHYS;
+
+ ivshmem_info->uio = info;
+ ivshmem_info->dev = dev;
+
+ if (request_msix_vectors(ivshmem_info, nvectors) != 0) {
+ printk(KERN_INFO "regular IRQs\n");
+ info->irq = dev->irq;
+ info->irq_flags = IRQF_SHARED;
+ info->handler = ivshmem_handler;
+ writel(0xffffffff, info->mem[0].internal_addr + IntrMask);
+ } else {
+ printk(KERN_INFO "MSI-X enabled\n");
+ info->irq = -1;
+ }
+
+ info->name = "ivshmem";
+ info->version = "0.0.1";
+
+ if (uio_register_device(&dev->dev, info))
+ goto out_unmap2;
+
+ pci_set_drvdata(dev, info);
+
+
+ return 0;
+out_unmap2:
+ iounmap(info->mem[2].internal_addr);
+out_unmap:
+ iounmap(info->mem[0].internal_addr);
+out_release:
+ pci_release_regions(dev);
+out_disable:
+ pci_disable_device(dev);
+out_free:
+ kfree (info);
+ return -ENODEV;
+}
+
+static void ivshmem_pci_remove(struct pci_dev *dev)
+{
+ struct uio_info *info = pci_get_drvdata(dev);
+
+ uio_unregister_device(info);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ iounmap(info->mem[0].internal_addr);
+
+ kfree (info);
+}
+
+static struct pci_device_id ivshmem_pci_ids[] __devinitdata = {
+ {
+ .vendor = 0x1af4,
+ .device = 0x1110,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0, }
+};
+
+static struct pci_driver ivshmem_pci_driver = {
+ .name = "uio_ivshmem",
+ .id_table = ivshmem_pci_ids,
+ .probe = ivshmem_pci_probe,
+ .remove = ivshmem_pci_remove,
+};
+
+static int __init ivshmem_init_module(void)
+{
+ return pci_register_driver(&ivshmem_pci_driver);
+}
+
+static void __exit ivshmem_exit_module(void)
+{
+ pci_unregister_driver(&ivshmem_pci_driver);
+}
+
+module_init(ivshmem_init_module);
+module_exit(ivshmem_exit_module);
+
+MODULE_DEVICE_TABLE(pci, ivshmem_pci_ids);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Cam Macdonell");
--
1.6.0.6
next prev parent reply other threads:[~2010-04-07 23:01 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-04-07 22:51 [Qemu-devel] [PATCH v4 0/3] PCI Shared memory device Cam Macdonell
2010-04-07 22:51 ` [Qemu-devel] [PATCH v4 1/3] Device specification for shared memory PCI device Cam Macdonell
2010-04-07 22:51 ` [Qemu-devel] [PATCH v4 2/3] Support adding a file to qemu's ram allocation Cam Macdonell
2010-04-07 22:52 ` [Qemu-devel] [PATCH v4 3/3] Inter-VM shared memory PCI device Cam Macdonell
2010-04-12 20:56 ` [Qemu-devel] " Avi Kivity
2010-04-14 23:30 ` Cam Macdonell
2010-04-15 8:33 ` Avi Kivity
2010-04-12 20:38 ` [Qemu-devel] Re: [PATCH v4 2/3] Support adding a file to qemu's ram allocation Avi Kivity
2010-04-07 23:00 ` Cam Macdonell [this message]
2010-04-12 20:57 ` [Qemu-devel] Re: [PATCH v4] Shared memory uio_pci driver Avi Kivity
2010-04-23 17:45 ` Cam Macdonell
2010-04-24 9:28 ` Avi Kivity
2010-04-12 20:34 ` [Qemu-devel] Re: [PATCH v4 1/3] Device specification for shared memory PCI device Avi Kivity
2010-04-12 21:11 ` Cam Macdonell
2010-04-12 21:17 ` [Qemu-devel] Re: [PATCH v4 0/3] PCI Shared memory device Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1270681258-9042-1-git-send-email-cam@cs.ualberta.ca \
--to=cam@cs.ualberta.ca \
--cc=kvm@vger.kernel.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).