qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org, asias@redhat.com, stefanha@redhat.com
Subject: [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs
Date: Wed, 26 Dec 2012 12:52:19 +0200	[thread overview]
Message-ID: <d3043e21ab7efa9bb8bd5b5f620c7e5766616250.1356519045.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1356519045.git.mst@redhat.com>

Pass nvqs to set_guest_notifiers. This makes it possible to
save on irqfds by not allocating one for the control vq
for virtio-net.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/vhost.c      | 10 +++++++---
 hw/virtio-pci.c | 19 ++++++++++++++-----
 hw/virtio-pci.h |  1 +
 hw/virtio.h     |  2 +-
 4 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index 4e1cb47..b6d73ca 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -879,7 +879,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
         goto fail;
     }
 
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
+    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
+                                           hdev->nvqs,
+                                           true);
     if (r < 0) {
         fprintf(stderr, "Error binding guest notifier: %d\n", -r);
         goto fail_notifiers;
@@ -929,7 +931,7 @@ fail_vq:
     }
 fail_mem:
 fail_features:
-    vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
+    vdev->binding->set_guest_notifiers(vdev->binding_opaque, hdev->nvqs, false);
 fail_notifiers:
 fail:
     return r;
@@ -950,7 +952,9 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
         vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
                                 0, (hwaddr)~0x0ull);
     }
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
+    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
+                                           hdev->nvqs,
+                                           false);
     if (r < 0) {
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
         fflush(stderr);
diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index af9a56c..3c258a4 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -535,7 +535,7 @@ static int kvm_virtio_pci_vector_use(PCIDevice *dev, unsigned vector,
     VirtIODevice *vdev = proxy->vdev;
     int ret, queue_no;
 
-    for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
+    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
         }
@@ -565,7 +565,7 @@ static void kvm_virtio_pci_vector_release(PCIDevice *dev, unsigned vector)
     VirtIODevice *vdev = proxy->vdev;
     int queue_no;
 
-    for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
+    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
         }
@@ -587,7 +587,7 @@ static void kvm_virtio_pci_vector_poll(PCIDevice *dev,
     EventNotifier *notifier;
     VirtQueue *vq;
 
-    for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
+    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
         }
@@ -631,7 +631,7 @@ static bool virtio_pci_query_guest_notifiers(DeviceState *d)
     return msix_enabled(&proxy->pci_dev);
 }
 
-static int virtio_pci_set_guest_notifiers(DeviceState *d, bool assign)
+static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
 {
     VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
     VirtIODevice *vdev = proxy->vdev;
@@ -639,6 +639,15 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, bool assign)
     bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
         kvm_msi_via_irqfd_enabled();
 
+    nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
+
+    /* When deassigning, pass a consistent nvqs value
+     * to avoid leaking notifiers.
+     */
+    assert(assign || nvqs == proxy->nvqs_with_notifiers);
+
+    proxy->nvqs_with_notifiers = nvqs;
+
     /* Must unset vector notifier while guest notifier is still assigned */
     if (proxy->vector_irqfd && !assign) {
         msix_unset_vector_notifiers(&proxy->pci_dev);
@@ -646,7 +655,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, bool assign)
         proxy->vector_irqfd = NULL;
     }
 
-    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+    for (n = 0; n < nvqs; n++) {
         if (!virtio_queue_get_num(vdev, n)) {
             break;
         }
diff --git a/hw/virtio-pci.h b/hw/virtio-pci.h
index b58d9a2..b0f17e2 100644
--- a/hw/virtio-pci.h
+++ b/hw/virtio-pci.h
@@ -51,6 +51,7 @@ typedef struct {
     bool ioeventfd_disabled;
     bool ioeventfd_started;
     VirtIOIRQFD *vector_irqfd;
+    int nvqs_with_notifiers;
 } VirtIOPCIProxy;
 
 void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev);
diff --git a/hw/virtio.h b/hw/virtio.h
index 1dec9dc..329b426 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -99,7 +99,7 @@ typedef struct {
     int (*load_done)(DeviceState *d, QEMUFile *f);
     unsigned (*get_features)(DeviceState *d);
     bool (*query_guest_notifiers)(DeviceState *d);
-    int (*set_guest_notifiers)(DeviceState *d, bool assigned);
+    int (*set_guest_notifiers)(DeviceState *d, int nvqs, bool assigned);
     int (*set_host_notifier)(DeviceState *d, int n, bool assigned);
     void (*vmstate_change)(DeviceState *d, bool running);
 } VirtIOBindings;
-- 
MST

  reply	other threads:[~2012-12-26 10:49 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
2012-12-26 10:52 ` Michael S. Tsirkin [this message]
2013-01-03 11:51   ` [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs Stefan Hajnoczi
2012-12-26 10:52 ` [Qemu-devel] [PATCH 2/8] msix: add api to access msix message Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 4/8] virtio-pci: cache msix messages Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 3/8] kvm: add stub for update msi route Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 5/8] virtio: backend virtqueue notifier masking Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 8/8] vhost: backend masking support Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 6/8] virtio-net: set/clear vhost_started in reverse order Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 7/8] vhost: set started flag while start is in progress Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d3043e21ab7efa9bb8bd5b5f620c7e5766616250.1356519045.git.mst@redhat.com \
    --to=mst@redhat.com \
    --cc=asias@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).