qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	Jason Wang <jasowang@redhat.com>,
	Victor Kaplansky <victork@redhat.com>,
	Didier Pallard <didier.pallard@6wind.com>
Subject: [Qemu-devel] [PULL 12/13] vhost-user interrupt management fixes
Date: Fri, 19 Feb 2016 10:00:49 +0200	[thread overview]
Message-ID: <1455868726-26350-13-git-send-email-mst@redhat.com> (raw)
In-Reply-To: <1455868726-26350-1-git-send-email-mst@redhat.com>

From: Victor Kaplansky <victork@redhat.com>

Since guest_mask_notifier can not be used in vhost-user mode due
to buffering implied by unix control socket, force
use_mask_notifier on virtio devices of vhost-user interfaces, and
send correct callfd to the guest at vhost start.

Using guest_notifier_mask function in vhost-user case may
break interrupt mask paradigm, because mask/unmask is not
really done when returning from guest_notifier_mask call, instead
message is posted in a unix socket, and processed later.

Add an option boolean flag 'use_mask_notifier' to disable the use
of guest_notifier_mask in virtio pci.

Signed-off-by: Didier Pallard <didier.pallard@6wind.com>
Signed-off-by: Victor Kaplansky <victork@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 include/hw/virtio/virtio.h |  1 +
 hw/net/vhost_net.c         | 15 +++++++++++++--
 hw/virtio/vhost.c          |  9 +++++++++
 hw/virtio/virtio-pci.c     | 14 ++++++++------
 hw/virtio/virtio.c         |  1 +
 5 files changed, 32 insertions(+), 8 deletions(-)

diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 108cdb0..c38a2fe 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -90,6 +90,7 @@ struct VirtIODevice
     VMChangeStateEntry *vmstate;
     char *bus_name;
     uint8_t device_endian;
+    bool use_guest_notifier_mask;
     QLIST_HEAD(, VirtQueue) *vector_queues;
 };
 
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index b242832..6e1032f 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -284,8 +284,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
     }
 
     for (i = 0; i < total_queues; i++) {
-        vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2);
-    }
+        struct vhost_net *net;
+
+        net = get_vhost_net(ncs[i].peer);
+        vhost_net_set_vq_index(net, i * 2);
+
+        /* Suppress the masking guest notifiers on vhost user
+         * because vhost user doesn't interrupt masking/unmasking
+         * properly.
+         */
+        if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+                dev->use_guest_notifier_mask = false;
+        }
+     }
 
     r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
     if (r < 0) {
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 9f8ac38..72d0c9e 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -875,6 +875,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
     /* Clear and discard previous events if any. */
     event_notifier_test_and_clear(&vq->masked_notifier);
 
+    /* Init vring in unmasked state, unless guest_notifier_mask
+     * will do it later.
+     */
+    if (!vdev->use_guest_notifier_mask) {
+        /* TODO: check and handle errors. */
+        vhost_virtqueue_mask(dev, vdev, idx, false);
+    }
+
     return 0;
 
 fail_kick:
@@ -1167,6 +1175,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
     struct vhost_vring_file file;
 
     if (mask) {
+        assert(vdev->use_guest_notifier_mask);
         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
     } else {
         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 5494ff4..440776c 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
         /* If guest supports masking, set up irqfd now.
          * Otherwise, delay until unmasked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
             ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
             if (ret < 0) {
                 kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -822,7 +822,7 @@ undo:
         if (vector >= msix_nr_vectors_allocated(dev)) {
             continue;
         }
-        if (k->guest_notifier_mask) {
+        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
         /* If guest supports masking, clean up irqfd now.
          * Otherwise, it was cleaned when masked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
     /* If guest supports masking, irqfd is already setup, unmask it.
      * Otherwise, set it up now.
      */
-    if (k->guest_notifier_mask) {
+    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, false);
         /* Test after unmasking to avoid losing events. */
         if (k->guest_notifier_pending &&
@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
     /* If guest supports masking, keep irqfd but mask it.
      * Otherwise, clean it up now.
      */ 
-    if (k->guest_notifier_mask) {
+    if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, true);
     } else {
         kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
         event_notifier_cleanup(notifier);
     }
 
-    if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
+    if (!msix_enabled(&proxy->pci_dev) &&
+        vdev->use_guest_notifier_mask &&
+        vdc->guest_notifier_mask) {
         vdc->guest_notifier_mask(vdev, n, !assign);
     }
 
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 90f2545..e365960 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -1677,6 +1677,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
     vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
                                                      vdev);
     vdev->device_endian = virtio_default_endian();
+    vdev->use_guest_notifier_mask = true;
 }
 
 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
-- 
MST

  parent reply	other threads:[~2016-02-19  8:01 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-02-19  8:00 [Qemu-devel] [PULL 00/13] vhost, virtio, pci, pxe Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 01/13] virtio-net: use the backend cross-endian capabilities Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 02/13] vhost-net: revert support of cross-endian vnet headers Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 03/13] virtio: move cross-endian helper to vhost Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 04/13] vhost: move virtio 1.0 check to cross-endian helper Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 05/13] vhost: simplify vhost_needs_vring_endian() Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 06/13] virtio: optimize virtio_access_is_big_endian() for little-endian targets Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 07/13] msix: fix msix_vector_masked Michael S. Tsirkin
2016-02-19 12:18   ` Stefano Stabellini
2016-02-19 12:20     ` Peter Maydell
2016-02-19 12:29       ` Stefano Stabellini
2016-02-19  8:00 ` [Qemu-devel] [PULL 08/13] tests: add pxe e1000 and virtio-pci tests Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 09/13] dec: convert to realize() Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 10/13] change type of pci_bridge_initfn() to void Michael S. Tsirkin
2016-02-19  8:00 ` [Qemu-devel] [PULL 11/13] rules: filter out irrelevant files Michael S. Tsirkin
2016-02-19  8:00 ` Michael S. Tsirkin [this message]
2016-02-19  8:00 ` [Qemu-devel] [PULL 13/13] tests/vhost-user-bridge: add scattering of incoming packets Michael S. Tsirkin
2016-02-19 12:09 ` [Qemu-devel] [PULL 00/13] vhost, virtio, pci, pxe Peter Maydell
2016-02-19 12:41   ` Samuel Thibault
2016-02-19 14:17   ` Peter Maydell
2016-03-23  0:05   ` Samuel Thibault
2016-03-23 12:43     ` Peter Maydell
2016-03-23 12:45       ` Samuel Thibault

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1455868726-26350-13-git-send-email-mst@redhat.com \
    --to=mst@redhat.com \
    --cc=didier.pallard@6wind.com \
    --cc=jasowang@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=victork@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).