qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations
@ 2012-12-26 10:52 Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs Michael S. Tsirkin
                   ` (7 more replies)
  0 siblings, 8 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

This patchset implements two msix masking optimizations.
It works fine for me but I did not have the time to do
performance testing yet so I do not know
whether it helps and which workloads.
Sending out now as I'll be on vacation for a week.
Please review and comment.

Thanks!

Michael S. Tsirkin (8):
  virtio: don't waste irqfds on control vqs
  msix: add api to access msix message
  kvm: add stub for update msi route
  virtio-pci: cache msix messages
  virtio: backend virtqueue notifier masking
  virtio-net: set/clear vhost_started in reverse order
  vhost: set started flag while start is in progress
  vhost: backend masking support

 hw/pci/msix.c   |   2 +-
 hw/pci/msix.h   |   1 +
 hw/vhost.c      | 112 ++++++++++++++++++++++++++-----
 hw/vhost.h      |  10 +++
 hw/vhost_net.c  |  27 +++++++-
 hw/vhost_net.h  |   3 +
 hw/virtio-net.c |  22 +++++-
 hw/virtio-pci.c | 203 ++++++++++++++++++++++++++++++++++++++++++++++++--------
 hw/virtio-pci.h |   2 +
 hw/virtio.h     |  15 ++++-
 kvm-stub.c      |   5 ++
 11 files changed, 350 insertions(+), 52 deletions(-)

-- 
MST

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  2013-01-03 11:51   ` Stefan Hajnoczi
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 2/8] msix: add api to access msix message Michael S. Tsirkin
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

Pass nvqs to set_guest_notifiers. This makes it possible to
save on irqfds by not allocating one for the control vq
for virtio-net.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/vhost.c      | 10 +++++++---
 hw/virtio-pci.c | 19 ++++++++++++++-----
 hw/virtio-pci.h |  1 +
 hw/virtio.h     |  2 +-
 4 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index 4e1cb47..b6d73ca 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -879,7 +879,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
         goto fail;
     }
 
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
+    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
+                                           hdev->nvqs,
+                                           true);
     if (r < 0) {
         fprintf(stderr, "Error binding guest notifier: %d\n", -r);
         goto fail_notifiers;
@@ -929,7 +931,7 @@ fail_vq:
     }
 fail_mem:
 fail_features:
-    vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
+    vdev->binding->set_guest_notifiers(vdev->binding_opaque, hdev->nvqs, false);
 fail_notifiers:
 fail:
     return r;
@@ -950,7 +952,9 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
         vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
                                 0, (hwaddr)~0x0ull);
     }
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
+    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
+                                           hdev->nvqs,
+                                           false);
     if (r < 0) {
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
         fflush(stderr);
diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index af9a56c..3c258a4 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -535,7 +535,7 @@ static int kvm_virtio_pci_vector_use(PCIDevice *dev, unsigned vector,
     VirtIODevice *vdev = proxy->vdev;
     int ret, queue_no;
 
-    for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
+    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
         }
@@ -565,7 +565,7 @@ static void kvm_virtio_pci_vector_release(PCIDevice *dev, unsigned vector)
     VirtIODevice *vdev = proxy->vdev;
     int queue_no;
 
-    for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
+    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
         }
@@ -587,7 +587,7 @@ static void kvm_virtio_pci_vector_poll(PCIDevice *dev,
     EventNotifier *notifier;
     VirtQueue *vq;
 
-    for (queue_no = 0; queue_no < VIRTIO_PCI_QUEUE_MAX; queue_no++) {
+    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
         if (!virtio_queue_get_num(vdev, queue_no)) {
             break;
         }
@@ -631,7 +631,7 @@ static bool virtio_pci_query_guest_notifiers(DeviceState *d)
     return msix_enabled(&proxy->pci_dev);
 }
 
-static int virtio_pci_set_guest_notifiers(DeviceState *d, bool assign)
+static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
 {
     VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
     VirtIODevice *vdev = proxy->vdev;
@@ -639,6 +639,15 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, bool assign)
     bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
         kvm_msi_via_irqfd_enabled();
 
+    nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
+
+    /* When deassigning, pass a consistent nvqs value
+     * to avoid leaking notifiers.
+     */
+    assert(assign || nvqs == proxy->nvqs_with_notifiers);
+
+    proxy->nvqs_with_notifiers = nvqs;
+
     /* Must unset vector notifier while guest notifier is still assigned */
     if (proxy->vector_irqfd && !assign) {
         msix_unset_vector_notifiers(&proxy->pci_dev);
@@ -646,7 +655,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, bool assign)
         proxy->vector_irqfd = NULL;
     }
 
-    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+    for (n = 0; n < nvqs; n++) {
         if (!virtio_queue_get_num(vdev, n)) {
             break;
         }
diff --git a/hw/virtio-pci.h b/hw/virtio-pci.h
index b58d9a2..b0f17e2 100644
--- a/hw/virtio-pci.h
+++ b/hw/virtio-pci.h
@@ -51,6 +51,7 @@ typedef struct {
     bool ioeventfd_disabled;
     bool ioeventfd_started;
     VirtIOIRQFD *vector_irqfd;
+    int nvqs_with_notifiers;
 } VirtIOPCIProxy;
 
 void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev);
diff --git a/hw/virtio.h b/hw/virtio.h
index 1dec9dc..329b426 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -99,7 +99,7 @@ typedef struct {
     int (*load_done)(DeviceState *d, QEMUFile *f);
     unsigned (*get_features)(DeviceState *d);
     bool (*query_guest_notifiers)(DeviceState *d);
-    int (*set_guest_notifiers)(DeviceState *d, bool assigned);
+    int (*set_guest_notifiers)(DeviceState *d, int nvqs, bool assigned);
     int (*set_host_notifier)(DeviceState *d, int n, bool assigned);
     void (*vmstate_change)(DeviceState *d, bool running);
 } VirtIOBindings;
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 2/8] msix: add api to access msix message
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 4/8] virtio-pci: cache msix messages Michael S. Tsirkin
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

Will be used by virtio pci.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/pci/msix.c | 2 +-
 hw/pci/msix.h | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index 9eee657..e231a0d 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -27,7 +27,7 @@
 #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
 #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
 
-static MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
+MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
 {
     uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
     MSIMessage msg;
diff --git a/hw/pci/msix.h b/hw/pci/msix.h
index d0c4429..e648410 100644
--- a/hw/pci/msix.h
+++ b/hw/pci/msix.h
@@ -5,6 +5,7 @@
 #include "hw/pci/pci.h"
 
 void msix_set_message(PCIDevice *dev, int vector, MSIMessage msg);
+MSIMessage msix_get_message(PCIDevice *dev, unsigned int vector);
 int msix_init(PCIDevice *dev, unsigned short nentries,
               MemoryRegion *table_bar, uint8_t table_bar_nr,
               unsigned table_offset, MemoryRegion *pba_bar,
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 4/8] virtio-pci: cache msix messages
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 2/8] msix: add api to access msix message Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 3/8] kvm: add stub for update msi route Michael S. Tsirkin
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

Some guests mask a vector then unmask without changing it.
Store vectors to avoid kvm system calls in this case.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/virtio-pci.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++----------
 hw/virtio-pci.h |   1 +
 2 files changed, 100 insertions(+), 20 deletions(-)

diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index 3c258a4..1d77146 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -487,8 +487,6 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
                                         unsigned int vector,
                                         MSIMessage msg)
 {
-    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
     VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
     int ret;
 
@@ -500,18 +498,94 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
         irqfd->virq = ret;
     }
     irqfd->users++;
+    return 0;
+}
 
-    ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
-    if (ret < 0) {
-        if (--irqfd->users == 0) {
-            kvm_irqchip_release_virq(kvm_state, irqfd->virq);
+static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
+                                             unsigned int vector)
+{
+    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+    if (--irqfd->users == 0) {
+        kvm_irqchip_release_virq(kvm_state, irqfd->virq);
+    }
+}
+
+static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+{
+    PCIDevice *dev = &proxy->pci_dev;
+    VirtIODevice *vdev = proxy->vdev;
+    unsigned int vector;
+    int ret, queue_no;
+    MSIMessage msg;
+
+    for (queue_no = 0; queue_no < nvqs; queue_no++) {
+        if (!virtio_queue_get_num(vdev, queue_no)) {
+            break;
+        }
+        vector = virtio_queue_vector(vdev, queue_no);
+        if (vector >= msix_nr_vectors_allocated(dev)) {
+            continue;
+        }
+        msg = msix_get_message(dev, vector);
+        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
+        if (ret < 0) {
+            goto undo;
         }
-        return ret;
     }
     return 0;
+
+undo:
+    while (--queue_no >= 0) {
+        vector = virtio_queue_vector(vdev, queue_no);
+        if (vector >= msix_nr_vectors_allocated(dev)) {
+            continue;
+        }
+        kvm_virtio_pci_vq_vector_release(proxy, vector);
+    }
+    return ret;
 }
 
-static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
+static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+{
+    PCIDevice *dev = &proxy->pci_dev;
+    VirtIODevice *vdev = proxy->vdev;
+    unsigned int vector;
+    int queue_no;
+
+    for (queue_no = 0; queue_no < nvqs; queue_no++) {
+        if (!virtio_queue_get_num(vdev, queue_no)) {
+            break;
+        }
+        vector = virtio_queue_vector(vdev, queue_no);
+        if (vector >= msix_nr_vectors_allocated(dev)) {
+            continue;
+        }
+        kvm_virtio_pci_vq_vector_release(proxy, vector);
+    }
+}
+
+static int kvm_virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
+                                        unsigned int queue_no,
+                                        unsigned int vector,
+                                        MSIMessage msg)
+{
+    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
+    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
+    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+    int ret;
+
+    if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
+        ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
+    return ret;
+}
+
+static void kvm_virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
                                              unsigned int queue_no,
                                              unsigned int vector)
 {
@@ -522,13 +596,9 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
 
     ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
     assert(ret == 0);
-
-    if (--irqfd->users == 0) {
-        kvm_irqchip_release_virq(kvm_state, irqfd->virq);
-    }
 }
 
-static int kvm_virtio_pci_vector_use(PCIDevice *dev, unsigned vector,
+static int kvm_virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
                                      MSIMessage msg)
 {
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
@@ -542,7 +612,7 @@ static int kvm_virtio_pci_vector_use(PCIDevice *dev, unsigned vector,
         if (virtio_queue_vector(vdev, queue_no) != vector) {
             continue;
         }
-        ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
+        ret = kvm_virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
         if (ret < 0) {
             goto undo;
         }
@@ -554,12 +624,12 @@ undo:
         if (virtio_queue_vector(vdev, queue_no) != vector) {
             continue;
         }
-        kvm_virtio_pci_vq_vector_release(proxy, queue_no, vector);
+        kvm_virtio_pci_vq_vector_mask(proxy, queue_no, vector);
     }
     return ret;
 }
 
-static void kvm_virtio_pci_vector_release(PCIDevice *dev, unsigned vector)
+static void kvm_virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
 {
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
     VirtIODevice *vdev = proxy->vdev;
@@ -572,7 +642,7 @@ static void kvm_virtio_pci_vector_release(PCIDevice *dev, unsigned vector)
         if (virtio_queue_vector(vdev, queue_no) != vector) {
             continue;
         }
-        kvm_virtio_pci_vq_vector_release(proxy, queue_no, vector);
+        kvm_virtio_pci_vq_vector_mask(proxy, queue_no, vector);
     }
 }
 
@@ -651,6 +721,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
     /* Must unset vector notifier while guest notifier is still assigned */
     if (proxy->vector_irqfd && !assign) {
         msix_unset_vector_notifiers(&proxy->pci_dev);
+        kvm_virtio_pci_vector_release(proxy, nvqs);
         g_free(proxy->vector_irqfd);
         proxy->vector_irqfd = NULL;
     }
@@ -672,17 +743,25 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
         proxy->vector_irqfd =
             g_malloc0(sizeof(*proxy->vector_irqfd) *
                       msix_nr_vectors_allocated(&proxy->pci_dev));
+        r = kvm_virtio_pci_vector_use(proxy, nvqs);
+        if (r < 0) {
+            goto assign_error;
+        }
         r = msix_set_vector_notifiers(&proxy->pci_dev,
-                                      kvm_virtio_pci_vector_use,
-                                      kvm_virtio_pci_vector_release,
+                                      kvm_virtio_pci_vector_unmask,
+                                      kvm_virtio_pci_vector_mask,
                                       kvm_virtio_pci_vector_poll);
         if (r < 0) {
-            goto assign_error;
+            goto notifiers_error;
         }
     }
 
     return 0;
 
+notifiers_error:
+    assert(assign);
+    kvm_virtio_pci_vector_release(proxy, nvqs);
+
 assign_error:
     /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
     assert(assign);
diff --git a/hw/virtio-pci.h b/hw/virtio-pci.h
index b0f17e2..9ff3139 100644
--- a/hw/virtio-pci.h
+++ b/hw/virtio-pci.h
@@ -27,6 +27,7 @@
 #define VIRTIO_PCI_FLAG_USE_IOEVENTFD   (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
 
 typedef struct {
+    MSIMessage msg;
     int virq;
     unsigned int users;
 } VirtIOIRQFD;
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 3/8] kvm: add stub for update msi route
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
                   ` (2 preceding siblings ...)
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 4/8] virtio-pci: cache msix messages Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 5/8] virtio: backend virtqueue notifier masking Michael S. Tsirkin
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

Will be used by virtio-pci.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 kvm-stub.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/kvm-stub.c b/kvm-stub.c
index 5b97152..81f8967 100644
--- a/kvm-stub.c
+++ b/kvm-stub.c
@@ -131,6 +131,11 @@ void kvm_irqchip_release_virq(KVMState *s, int virq)
 {
 }
 
+int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
+{
+    return -ENOSYS;
+}
+
 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
 {
     return -ENOSYS;
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 5/8] virtio: backend virtqueue notifier masking
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
                   ` (3 preceding siblings ...)
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 3/8] kvm: add stub for update msi route Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 8/8] vhost: backend masking support Michael S. Tsirkin
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

some backends (notably vhost) can mask events
at their source in a way that is more efficient
than masking through kvm.
Specifically
- masking in kvm uses rcu write side so it has high latency
- in kvm on unmask we always send an interrupt
masking at source does not have these issues.

Add such support in virtio.h and use in virtio-pci.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/virtio-pci.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++-------
 hw/virtio.h     | 13 ++++++++++
 2 files changed, 83 insertions(+), 9 deletions(-)

diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index 1d77146..aad9d6d 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -510,6 +510,31 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
     }
 }
 
+static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
+                                 unsigned int queue_no,
+                                 unsigned int vector)
+{
+    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
+    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
+    int ret;
+    ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
+    return ret;
+}
+
+static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
+                                      unsigned int queue_no,
+                                      unsigned int vector)
+{
+    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
+    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
+    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+    int ret;
+
+    ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
+    assert(ret == 0);
+}
+
 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
 {
     PCIDevice *dev = &proxy->pci_dev;
@@ -531,6 +556,16 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
         if (ret < 0) {
             goto undo;
         }
+        /* If guest supports masking, set up irqfd now.
+         * Otherwise, delay until unmasked in the frontend.
+         */
+        if (proxy->vdev->guest_notifier_mask) {
+            ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+            if (ret < 0) {
+                kvm_virtio_pci_vq_vector_release(proxy, vector);
+                goto undo;
+            }
+        }
     }
     return 0;
 
@@ -540,6 +575,9 @@ undo:
         if (vector >= msix_nr_vectors_allocated(dev)) {
             continue;
         }
+        if (proxy->vdev->guest_notifier_mask) {
+            kvm_virtio_pci_irqfd_release(proxy, vector, queue_no);
+        }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
     }
     return ret;
@@ -560,6 +598,12 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
         if (vector >= msix_nr_vectors_allocated(dev)) {
             continue;
         }
+        /* If guest supports masking, clean up irqfd now.
+         * Otherwise, it was cleaned when masked in the frontend.
+         */
+        if (proxy->vdev->guest_notifier_mask) {
+            kvm_virtio_pci_irqfd_release(proxy, vector, queue_no);
+        }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
     }
 }
@@ -581,7 +625,19 @@ static int kvm_virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
         }
     }
 
-    ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
+    /* If guest supports masking, irqfd is already setup, unmask it.
+     * Otherwise, set it up now.
+     */
+    if (proxy->vdev->guest_notifier_mask) {
+        proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, false);
+        /* Test after unmasking to avoid losing events. */
+        if (proxy->vdev->guest_notifier_pending &&
+            proxy->vdev->guest_notifier_pending(proxy->vdev, queue_no)) {
+            event_notifier_set(n);
+        }
+    } else {
+        ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+    }
     return ret;
 }
 
@@ -589,13 +645,14 @@ static void kvm_virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
                                              unsigned int queue_no,
                                              unsigned int vector)
 {
-    VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
-    EventNotifier *n = virtio_queue_get_guest_notifier(vq);
-    VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
-    int ret;
-
-    ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
-    assert(ret == 0);
+    /* If guest supports masking, keep irqfd but mask it.
+     * Otherwise, clean it up now.
+     */ 
+    if (proxy->vdev->guest_notifier_mask) {
+        proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, true);
+    } else {
+        kvm_virtio_pci_irqfd_release(proxy, vector, queue_no);
+    }
 }
 
 static int kvm_virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
@@ -668,7 +725,11 @@ static void kvm_virtio_pci_vector_poll(PCIDevice *dev,
         }
         vq = virtio_get_queue(vdev, queue_no);
         notifier = virtio_queue_get_guest_notifier(vq);
-        if (event_notifier_test_and_clear(notifier)) {
+        if (vdev->guest_notifier_pending) {
+            if (vdev->guest_notifier_pending(vdev, queue_no)) {
+                msix_set_pending(dev, vector);
+            }
+        } else if (event_notifier_test_and_clear(notifier)) {
             msix_set_pending(dev, vector);
         }
     }
diff --git a/hw/virtio.h b/hw/virtio.h
index 329b426..b9f1873 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -126,6 +126,19 @@ struct VirtIODevice
     void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
     void (*reset)(VirtIODevice *vdev);
     void (*set_status)(VirtIODevice *vdev, uint8_t val);
+    /* Test and clear event pending status.
+     * Should be called after unmask to avoid losing events.
+     * If backend does not support masking,
+     * must check in frontend instead.
+     */
+    bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
+    /* Mask/unmask events from this vq. Any events reported
+     * while masked will become pending.
+     * If backend does not support masking,
+     * must mask in frontend instead.
+     */
+    void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
+
     VirtQueue *vq;
     const VirtIOBindings *binding;
     DeviceState *binding_opaque;
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 8/8] vhost: backend masking support
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
                   ` (4 preceding siblings ...)
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 5/8] virtio: backend virtqueue notifier masking Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 6/8] virtio-net: set/clear vhost_started in reverse order Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 7/8] vhost: set started flag while start is in progress Michael S. Tsirkin
  7 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

Support backend guest notifier masking in vhost-net:
create eventfd at device init, when masked,
make vhost use that as eventfd instead of
sending an interrupt.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/vhost.c      | 95 +++++++++++++++++++++++++++++++++++++++++++++++++--------
 hw/vhost.h      | 10 ++++++
 hw/vhost_net.c  | 27 ++++++++++++++--
 hw/vhost_net.h  |  3 ++
 hw/virtio-net.c | 18 +++++++++++
 5 files changed, 137 insertions(+), 16 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index 4fa5007..cee8aad 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -612,7 +612,7 @@ static void vhost_log_stop(MemoryListener *listener,
     /* FIXME: implement */
 }
 
-static int vhost_virtqueue_init(struct vhost_dev *dev,
+static int vhost_virtqueue_start(struct vhost_dev *dev,
                                 struct VirtIODevice *vdev,
                                 struct vhost_virtqueue *vq,
                                 unsigned idx)
@@ -681,16 +681,11 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
         goto fail_kick;
     }
 
-    file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
-    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
-    if (r) {
-        r = -errno;
-        goto fail_call;
-    }
+    /* Clear and discard previous events if any. */
+    event_notifier_test_and_clear(&vq->masked_notifier);
 
     return 0;
 
-fail_call:
 fail_kick:
 fail_alloc:
     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
@@ -708,7 +703,7 @@ fail_alloc_desc:
     return r;
 }
 
-static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
+static void vhost_virtqueue_stop(struct vhost_dev *dev,
                                     struct VirtIODevice *vdev,
                                     struct vhost_virtqueue *vq,
                                     unsigned idx)
@@ -746,11 +741,39 @@ static void vhost_eventfd_del(MemoryListener *listener,
 {
 }
 
+static int vhost_virtqueue_init(struct vhost_dev *dev,
+                                struct vhost_virtqueue *vq, int n)
+{
+    struct vhost_vring_file file = {
+        .index = n,
+    };
+    int r = event_notifier_init(&vq->masked_notifier, 0);
+    if (r < 0) {
+        return r;
+    }
+
+    file.fd = event_notifier_get_fd(&vq->masked_notifier);
+    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
+    if (r) {
+        r = -errno;
+        goto fail_call;
+    }
+    return 0;
+fail_call:
+    event_notifier_cleanup(&vq->masked_notifier);
+    return r;
+}
+
+static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
+{
+    event_notifier_cleanup(&vq->masked_notifier);
+}
+
 int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
                    bool force)
 {
     uint64_t features;
-    int r;
+    int i, r;
     if (devfd >= 0) {
         hdev->control = devfd;
     } else {
@@ -768,6 +791,13 @@ int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
     if (r < 0) {
         goto fail;
     }
+
+    for (i = 0; i < hdev->nvqs; ++i) {
+        r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
+        if (r < 0) {
+            goto fail_vq;
+        }
+    }
     hdev->features = features;
 
     hdev->memory_listener = (MemoryListener) {
@@ -795,6 +825,10 @@ int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     hdev->force = force;
     return 0;
+fail_vq:
+    while (--i >= 0) {
+        vhost_virtqueue_cleanup(hdev->vqs + i);
+    }
 fail:
     r = -errno;
     close(hdev->control);
@@ -803,6 +837,10 @@ fail:
 
 void vhost_dev_cleanup(struct vhost_dev *hdev)
 {
+    int i;
+    for (i = 0; i < hdev->nvqs; ++i) {
+        vhost_virtqueue_cleanup(hdev->vqs + i);
+    }
     memory_listener_unregister(&hdev->memory_listener);
     g_free(hdev->mem);
     g_free(hdev->mem_sections);
@@ -869,6 +907,37 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     }
 }
 
+/* Test and clear event pending status.
+ * Should be called after unmask to avoid losing events.
+ */
+bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
+{
+    struct vhost_virtqueue *vq = hdev->vqs + n;
+    assert(hdev->started);
+    return event_notifier_test_and_clear(&vq->masked_notifier);
+}
+
+/* Mask/unmask events from this vq. */
+void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
+                         bool mask)
+{
+    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
+    int r;
+
+    assert(hdev->started);
+
+    struct vhost_vring_file file = {
+        .index = n,
+    };
+    if (mask) {
+        file.fd = event_notifier_get_fd(&hdev->vqs[n].masked_notifier);
+    } else {
+        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
+    }
+    r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file);
+    assert(r >= 0);
+}
+
 /* Host notifiers must be enabled at this point. */
 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
@@ -900,7 +969,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
         goto fail_mem;
     }
     for (i = 0; i < hdev->nvqs; ++i) {
-        r = vhost_virtqueue_init(hdev,
+        r = vhost_virtqueue_start(hdev,
                                  vdev,
                                  hdev->vqs + i,
                                  i);
@@ -925,7 +994,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
 fail_log:
 fail_vq:
     while (--i >= 0) {
-        vhost_virtqueue_cleanup(hdev,
+        vhost_virtqueue_stop(hdev,
                                 vdev,
                                 hdev->vqs + i,
                                 i);
@@ -946,7 +1015,7 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
     int i, r;
 
     for (i = 0; i < hdev->nvqs; ++i) {
-        vhost_virtqueue_cleanup(hdev,
+        vhost_virtqueue_stop(hdev,
                                 vdev,
                                 hdev->vqs + i,
                                 i);
diff --git a/hw/vhost.h b/hw/vhost.h
index 6f6a906..44c61a5 100644
--- a/hw/vhost.h
+++ b/hw/vhost.h
@@ -18,6 +18,7 @@ struct vhost_virtqueue {
     void *ring;
     unsigned long long ring_phys;
     unsigned ring_size;
+    EventNotifier masked_notifier;
 };
 
 typedef unsigned long vhost_log_chunk_t;
@@ -53,4 +54,13 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
 
+/* Test and clear masked event pending status.
+ * Should be called after unmask to avoid losing events.
+ */
+bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
+
+/* Mask/unmask events from this vq.
+ */
+void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
+                          bool mask);
 #endif
diff --git a/hw/vhost_net.c b/hw/vhost_net.c
index ae2785d..d3a04ca 100644
--- a/hw/vhost_net.c
+++ b/hw/vhost_net.c
@@ -109,6 +109,9 @@ struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
         (1 << VHOST_NET_F_VIRTIO_NET_HDR);
     net->backend = r;
 
+    net->dev.nvqs = 2;
+    net->dev.vqs = net->vqs;
+
     r = vhost_dev_init(&net->dev, devfd, "/dev/vhost-net", force);
     if (r < 0) {
         goto fail;
@@ -143,9 +146,6 @@ int vhost_net_start(struct vhost_net *net,
     struct vhost_vring_file file = { };
     int r;
 
-    net->dev.nvqs = 2;
-    net->dev.vqs = net->vqs;
-
     r = vhost_dev_enable_notifiers(&net->dev, dev);
     if (r < 0) {
         goto fail_notifiers;
@@ -200,6 +200,17 @@ void vhost_net_cleanup(struct vhost_net *net)
     vhost_dev_cleanup(&net->dev);
     g_free(net);
 }
+
+bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
+{
+    return vhost_virtqueue_pending(&net->dev, idx);
+}
+
+void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
+                              int idx, bool mask)
+{
+    vhost_virtqueue_mask(&net->dev, dev, idx, mask);
+}
 #else
 struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
                                  bool force)
@@ -234,4 +245,14 @@ unsigned vhost_net_get_features(struct vhost_net *net, unsigned features)
 void vhost_net_ack_features(struct vhost_net *net, unsigned features)
 {
 }
+
+bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
+{
+    return -ENOSYS;
+}
+
+void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
+                              int idx, bool mask)
+{
+}
 #endif
diff --git a/hw/vhost_net.h b/hw/vhost_net.h
index 012aba4..88912b8 100644
--- a/hw/vhost_net.h
+++ b/hw/vhost_net.h
@@ -17,4 +17,7 @@ void vhost_net_cleanup(VHostNetState *net);
 unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
 void vhost_net_ack_features(VHostNetState *net, unsigned features);
 
+bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
+void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
+                              int idx, bool mask);
 #endif
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index b756d57..3bb01b1 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -1010,6 +1010,22 @@ static NetClientInfo net_virtio_info = {
     .link_status_changed = virtio_net_set_link_status,
 };
 
+static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
+{
+    VirtIONet *n = to_virtio_net(vdev);
+    assert(n->vhost_started);
+    return vhost_net_virtqueue_pending(tap_get_vhost_net(n->nic->nc.peer), idx);
+}
+
+static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
+                                           bool mask)
+{
+    VirtIONet *n = to_virtio_net(vdev);
+    assert(n->vhost_started);
+    vhost_net_virtqueue_mask(tap_get_vhost_net(n->nic->nc.peer),
+                             vdev, idx, mask);
+}
+
 VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
                               virtio_net_conf *net)
 {
@@ -1026,6 +1042,8 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
     n->vdev.bad_features = virtio_net_bad_features;
     n->vdev.reset = virtio_net_reset;
     n->vdev.set_status = virtio_net_set_status;
+    n->vdev.guest_notifier_mask = virtio_net_guest_notifier_mask;
+    n->vdev.guest_notifier_pending = virtio_net_guest_notifier_pending;
     n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
 
     if (net->tx && strcmp(net->tx, "timer") && strcmp(net->tx, "bh")) {
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 6/8] virtio-net: set/clear vhost_started in reverse order
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
                   ` (5 preceding siblings ...)
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 8/8] vhost: backend masking support Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 7/8] vhost: set started flag while start is in progress Michael S. Tsirkin
  7 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

As vhost started is cleared last thing on stop,
set it first things on start. This makes it
possible to use vhost_started while start is in
progress which is used by follow-up patches.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/virtio-net.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 5d03b31..b756d57 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -126,12 +126,12 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
         if (!vhost_net_query(tap_get_vhost_net(n->nic->nc.peer), &n->vdev)) {
             return;
         }
+        n->vhost_started = 1;
         r = vhost_net_start(tap_get_vhost_net(n->nic->nc.peer), &n->vdev);
         if (r < 0) {
             error_report("unable to start vhost net: %d: "
                          "falling back on userspace virtio", -r);
-        } else {
-            n->vhost_started = 1;
+            n->vhost_started = 0;
         }
     } else {
         vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), &n->vdev);
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [Qemu-devel] [PATCH 7/8] vhost: set started flag while start is in progress
  2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
                   ` (6 preceding siblings ...)
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 6/8] virtio-net: set/clear vhost_started in reverse order Michael S. Tsirkin
@ 2012-12-26 10:52 ` Michael S. Tsirkin
  7 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2012-12-26 10:52 UTC (permalink / raw)
  To: qemu-devel, asias, stefanha

This makes it possible to use started flag for sanity checking
of callbacks that happen during start/stop.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/vhost.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index b6d73ca..4fa5007 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -873,6 +873,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
     int i, r;
+
+    hdev->started = true;
+
     if (!vdev->binding->set_guest_notifiers) {
         fprintf(stderr, "binding does not support guest notifiers\n");
         r = -ENOSYS;
@@ -918,8 +921,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
         }
     }
 
-    hdev->started = true;
-
     return 0;
 fail_log:
 fail_vq:
@@ -934,6 +935,8 @@ fail_features:
     vdev->binding->set_guest_notifiers(vdev->binding_opaque, hdev->nvqs, false);
 fail_notifiers:
 fail:
+
+    hdev->started = false;
     return r;
 }
 
-- 
MST

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs
  2012-12-26 10:52 ` [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs Michael S. Tsirkin
@ 2013-01-03 11:51   ` Stefan Hajnoczi
  0 siblings, 0 replies; 10+ messages in thread
From: Stefan Hajnoczi @ 2013-01-03 11:51 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: asias, qemu-devel

On Wed, Dec 26, 2012 at 12:52:19PM +0200, Michael S. Tsirkin wrote:
> Pass nvqs to set_guest_notifiers. This makes it possible to
> save on irqfds by not allocating one for the control vq
> for virtio-net.
> 
> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> ---
>  hw/vhost.c      | 10 +++++++---
>  hw/virtio-pci.c | 19 ++++++++++++++-----
>  hw/virtio-pci.h |  1 +
>  hw/virtio.h     |  2 +-
>  4 files changed, 23 insertions(+), 9 deletions(-)

Good idea!

This patch assumes only the first n virtqueues need irqfd.  Depending on
the device's virtqueue ordering this may not work (e.g. control vq first
followed by data vqs).  But this is a theoretical point, let's wait
until we hit it and need something fancier.

Stefan

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2013-01-03 11:52 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-12-26 10:52 [Qemu-devel] [PATCH 0/8] virtio-pci: msix masking optimizations Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 1/8] virtio: don't waste irqfds on control vqs Michael S. Tsirkin
2013-01-03 11:51   ` Stefan Hajnoczi
2012-12-26 10:52 ` [Qemu-devel] [PATCH 2/8] msix: add api to access msix message Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 4/8] virtio-pci: cache msix messages Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 3/8] kvm: add stub for update msi route Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 5/8] virtio: backend virtqueue notifier masking Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 8/8] vhost: backend masking support Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 6/8] virtio-net: set/clear vhost_started in reverse order Michael S. Tsirkin
2012-12-26 10:52 ` [Qemu-devel] [PATCH 7/8] vhost: set started flag while start is in progress Michael S. Tsirkin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).