qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Gerd Hoffmann <kraxel@redhat.com>
To: qemu-devel@nongnu.org
Cc: Gerd Hoffmann <kraxel@redhat.com>, "Michael S. Tsirkin" <mst@redhat.com>
Subject: [Qemu-devel] [PATCH] virtio-pci: add a per-device-class queue limit
Date: Tue, 30 Jun 2015 10:35:01 +0200	[thread overview]
Message-ID: <1435653301-7507-1-git-send-email-kraxel@redhat.com> (raw)

The qemu virtio 1.0 implementation uses one page per virtqueue for
notification, which sums up to 4M address space for the 1024 possible
queues.

We have three devices which might use many queues: virtio-scsi and
virtio-net have multiqueue support, and virtio-serial uses one queue
pair per port.  All other devices have a small fixed number of queues
and aquiring resources for 1024 queues is pointless.

This patch adds a per-device-class queue limit to fix this.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 hw/virtio/virtio-pci.c | 32 +++++++++++++++++++++++---------
 hw/virtio/virtio-pci.h |  2 ++
 2 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 6a0174e..4470e27 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -195,7 +195,7 @@ static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
         return;
     }
 
-    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+    for (n = 0; n < proxy->queue_max; n++) {
         if (!virtio_queue_get_num(vdev, n)) {
             continue;
         }
@@ -231,7 +231,7 @@ static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
         return;
     }
 
-    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+    for (n = 0; n < proxy->queue_max; n++) {
         if (!virtio_queue_get_num(vdev, n)) {
             continue;
         }
@@ -267,11 +267,11 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
             virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
         break;
     case VIRTIO_PCI_QUEUE_SEL:
-        if (val < VIRTIO_QUEUE_MAX)
+        if (val < proxy->queue_max)
             vdev->queue_sel = val;
         break;
     case VIRTIO_PCI_QUEUE_NOTIFY:
-        if (val < VIRTIO_QUEUE_MAX) {
+        if (val < proxy->queue_max) {
             virtio_queue_notify(vdev, val);
         }
         break;
@@ -775,7 +775,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
     bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
         kvm_msi_via_irqfd_enabled();
 
-    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
+    nvqs = MIN(nvqs, proxy->queue_max);
 
     /* When deassigning, pass a consistent nvqs value
      * to avoid leaking notifiers.
@@ -985,7 +985,7 @@ static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
         val = vdev->config_vector;
         break;
     case VIRTIO_PCI_COMMON_NUMQ:
-        for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
+        for (i = 0; i < proxy->queue_max; ++i) {
             if (virtio_queue_get_num(vdev, i)) {
                 val = i + 1;
             }
@@ -1085,7 +1085,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
 
         break;
     case VIRTIO_PCI_COMMON_Q_SELECT:
-        if (val < VIRTIO_QUEUE_MAX) {
+        if (val < proxy->queue_max) {
             vdev->queue_sel = val;
         }
         break;
@@ -1389,6 +1389,16 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
     VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
     VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
 
+    if (k->queue_max) {
+        proxy->queue_max = k->queue_max;
+    }
+    if (proxy->queue_max < 16) {
+        proxy->queue_max = 16;
+    }
+    if (proxy->queue_max > VIRTIO_QUEUE_MAX) {
+        proxy->queue_max = VIRTIO_QUEUE_MAX;
+    }
+
     /*
      * virtio pci bar layout used by default.
      * subclasses can re-arrange things if needed.
@@ -1416,13 +1426,13 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
 
     proxy->notify.offset = 0x3000;
     proxy->notify.size =
-        QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
+        QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * proxy->queue_max;
     proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
 
     /* subclasses can enforce modern, so do this unconditionally */
     memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
                        2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
-                       VIRTIO_QUEUE_MAX);
+                       proxy->queue_max);
 
     virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
     if (k->realize) {
@@ -1574,6 +1584,7 @@ static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
     PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
 
     k->realize = virtio_scsi_pci_realize;
+    k->queue_max = VIRTIO_QUEUE_MAX;
     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
     dc->props = virtio_scsi_pci_properties;
     pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
@@ -1629,6 +1640,7 @@ static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
     VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
     PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
     k->realize = vhost_scsi_pci_realize;
+    k->queue_max = VIRTIO_QUEUE_MAX;
     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
     dc->props = vhost_scsi_pci_properties;
     pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
@@ -1761,6 +1773,7 @@ static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
     VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
     PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
     k->realize = virtio_serial_pci_realize;
+    k->queue_max = VIRTIO_QUEUE_MAX;
     set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
     dc->props = virtio_serial_pci_properties;
     pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
@@ -1820,6 +1833,7 @@ static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
     dc->props = virtio_net_properties;
     vpciklass->realize = virtio_net_pci_realize;
+    vpciklass->queue_max = VIRTIO_QUEUE_MAX;
 }
 
 static void virtio_net_pci_instance_init(Object *obj)
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 05d9d24..1f66463 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -95,6 +95,7 @@ typedef struct {
 typedef struct VirtioPCIClass {
     PCIDeviceClass parent_class;
     void (*realize)(VirtIOPCIProxy *vpci_dev, Error **errp);
+    uint32_t queue_max;
 } VirtioPCIClass;
 
 typedef struct VirtIOPCIRegion {
@@ -128,6 +129,7 @@ struct VirtIOPCIProxy {
         uint32_t avail[2];
         uint32_t used[2];
     } vqs[VIRTIO_QUEUE_MAX];
+    uint32_t queue_max;
 
     bool ioeventfd_disabled;
     bool ioeventfd_started;
-- 
1.8.3.1

                 reply	other threads:[~2015-06-30  8:40 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1435653301-7507-1-git-send-email-kraxel@redhat.com \
    --to=kraxel@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).