From: "Michael S. Tsirkin" <mst@redhat.com>
To: Amit Shah <amit.shah@redhat.com>
Cc: kvm@vger.kernel.org
Subject: Re: qemu-kvm missing some msix capability check
Date: Tue, 21 Jul 2009 19:54:00 +0300 [thread overview]
Message-ID: <20090721165400.GB4826@redhat.com> (raw)
In-Reply-To: <20090717130439.GA10081@amit-x200.redhat.com>
On Fri, Jul 17, 2009 at 06:34:40PM +0530, Amit Shah wrote:
> Hello,
>
> Using recent qemu-kvm userspace with a slightly older kernel module I
> get this when using the virtio-net device:
>
> kvm_msix_add: kvm_get_irq_route_gsi failed: No space left on device
>
> ... and the guest doesn't use the net device.
>
> This goes away when using a newer kvm module.
>
> Amit
Could you verify if the following helps please?
---
virtio: retry on vector assignment failure
virtio currently fails to find any vqs if the device supports msi-x but
fails to assign it to a queue. Turns out, this actually happens for old
host kernels which can't allocate a gsi for the vector. As a result
guest does not use virtio net. Fix this by disabling msi on
such failures and falling back on regular interrupts.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 9dcc368..567c972 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -273,26 +273,35 @@ static void vp_free_vectors(struct virtio_device *vdev)
}
static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int *options, int noptions)
+ int nvectors)
{
- int i;
- for (i = 0; i < noptions; ++i)
- if (!pci_enable_msix(dev, entries, options[i]))
- return options[i];
- return -EBUSY;
+ int err = pci_enable_msix(dev, entries, nvectors);
+ if (err > 0)
+ err = -ENOSPC;
+ return err;
}
-static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
+static int vp_request_irq(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int err;
+ /* Can't allocate enough MSI-X vectors, use regular interrupt */
+ vp_dev->msix_vectors = 0;
+ err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
+ IRQF_SHARED, dev_name(&vp_dev->vdev.dev), vp_dev);
+ if (err)
+ return err;
+ vp_dev->intx_enabled = 1;
+ return 0;
+}
+
+static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs,
+ int nvectors)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
unsigned i, v;
int err = -ENOMEM;
- /* We want at most one vector per queue and one for config changes.
- * Fallback to separate vectors for config and a shared for queues.
- * Finally fall back to regular interrupts. */
- int options[] = { max_vqs + 1, 2 };
- int nvectors = max(options[0], options[1]);
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
GFP_KERNEL);
@@ -307,37 +316,29 @@ static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
vp_dev->msix_entries[i].entry = i;
err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries,
- options, ARRAY_SIZE(options));
- if (err < 0) {
- /* Can't allocate enough MSI-X vectors, use regular interrupt */
- vp_dev->msix_vectors = 0;
- err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
- IRQF_SHARED, name, vp_dev);
- if (err)
- goto error_irq;
- vp_dev->intx_enabled = 1;
- } else {
- vp_dev->msix_vectors = err;
- vp_dev->msix_enabled = 1;
-
- /* Set the vector used for configuration */
- v = vp_dev->msix_used_vectors;
- snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
- "%s-config", name);
- err = request_irq(vp_dev->msix_entries[v].vector,
- vp_config_changed, 0, vp_dev->msix_names[v],
- vp_dev);
- if (err)
- goto error_irq;
- ++vp_dev->msix_used_vectors;
-
- iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
- /* Verify we had enough resources to assign the vector */
- v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
- if (v == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto error_irq;
- }
+ nvectors);
+ if (err)
+ goto error_enable;
+ vp_dev->msix_vectors = nvectors;
+ vp_dev->msix_enabled = 1;
+
+ /* Set the vector used for configuration */
+ v = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+ "%s-config", name);
+ err = request_irq(vp_dev->msix_entries[v].vector,
+ vp_config_changed, 0, vp_dev->msix_names[v],
+ vp_dev);
+ if (err)
+ goto error_irq;
+ ++vp_dev->msix_used_vectors;
+
+ iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ /* Verify we had enough resources to assign the vector */
+ v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ if (v == VIRTIO_MSI_NO_VECTOR) {
+ err = -EBUSY;
+ goto error_irq;
}
if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) {
@@ -355,9 +356,12 @@ static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
return 0;
error_irq:
vp_free_vectors(vdev);
+error_enable:
kfree(vp_dev->msix_names);
+ vp_dev->msix_names = NULL;
error_names:
kfree(vp_dev->msix_entries);
+ vp_dev->msix_entries = NULL;
error_entries:
return err;
}
@@ -499,24 +503,24 @@ static void vp_del_vqs(struct virtio_device *vdev)
vp_free_vectors(vdev);
kfree(vp_dev->msix_names);
+ vp_dev->msix_names = NULL;
kfree(vp_dev->msix_entries);
+ vp_dev->msix_entries = NULL;
}
-/* the config->find_vqs() implementation */
-static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char *names[])
+static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[],
+ int max_vectors,
+ int nvectors)
{
- int vectors = 0;
int i, err;
- /* How many vectors would we like? */
- for (i = 0; i < nvqs; ++i)
- if (callbacks[i])
- ++vectors;
-
- err = vp_request_vectors(vdev, vectors);
+ if (nvectors)
+ err = vp_request_vectors(vdev, max_vectors, nvectors);
+ else
+ err = vp_request_irq(vdev);
if (err)
goto error_request;
@@ -534,6 +538,34 @@ error_request:
return PTR_ERR(vqs[i]);
}
+/* the config->find_vqs() implementation */
+static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ /* We want at most one vector per queue and one for config changes.
+ * Fallback to separate vectors for config and a shared for queues.
+ * Finally fall back to regular interrupts. */
+ int options[] = { 1, 2, 0 };
+ int vectors = 0;
+ int i, uninitialized_var(err);
+
+ /* How many vectors would we like? */
+ for (i = 0; i < nvqs; ++i)
+ if (callbacks[i])
+ ++vectors;
+ options[0] = vectors + 1;
+
+ for (i = 0; i < ARRAY_SIZE(options); ++i) {
+ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
+ vectors, options[i]);
+ if (!err)
+ break;
+ }
+ return err;
+}
+
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
next prev parent reply other threads:[~2009-07-21 16:54 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-07-17 13:04 qemu-kvm missing some msix capability check Amit Shah
2009-07-21 16:54 ` Michael S. Tsirkin [this message]
2009-07-21 17:12 ` Amit Shah
2009-07-21 17:34 ` Michael S. Tsirkin
2009-07-21 17:35 ` Michael S. Tsirkin
2009-07-23 14:25 ` Amit Shah
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090721165400.GB4826@redhat.com \
--to=mst@redhat.com \
--cc=amit.shah@redhat.com \
--cc=kvm@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox