qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: mst@redhat.com, aliguori@us.ibm.com, stefanha@redhat.com,
	qemu-devel@nongnu.org
Cc: krkumar2@in.ibm.com, kvm@vger.kernel.org, mprivozn@redhat.com,
	Jason Wang <jasowang@redhat.com>,
	rusty@rustcorp.com.au, jwhan@filewood.snu.ac.kr,
	shiyer@redhat.com
Subject: [Qemu-devel] [PATCH 06/12] vhost: multiqueue support
Date: Fri, 28 Dec 2012 18:31:58 +0800	[thread overview]
Message-ID: <1356690724-37891-7-git-send-email-jasowang@redhat.com> (raw)
In-Reply-To: <1356690724-37891-1-git-send-email-jasowang@redhat.com>

This patch lets vhost support multiqueue. The idea is simple, just launching
multiple threads of vhost and let each of vhost thread processing a subset of
the virtqueues of the device.

The only thing needed is passing a virtqueue index when starting vhost device,
this is used to track the first virtqueue which this vhost thread serves.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/vhost.c      |   52 +++++++++++++++++++++++++++++++++-------------------
 hw/vhost.h      |    2 ++
 hw/vhost_net.c  |    7 +++++--
 hw/vhost_net.h  |    2 +-
 hw/virtio-net.c |    3 ++-
 5 files changed, 43 insertions(+), 23 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index 16322a1..63c76d6 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -619,11 +619,12 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
 {
     hwaddr s, l, a;
     int r;
+    int vhost_vq_index = idx % dev->nvqs;
     struct vhost_vring_file file = {
-        .index = idx,
+        .index = vhost_vq_index
     };
     struct vhost_vring_state state = {
-        .index = idx,
+        .index = vhost_vq_index
     };
     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
 
@@ -669,11 +670,12 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
         goto fail_alloc_ring;
     }
 
-    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
+    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
     if (r < 0) {
         r = -errno;
         goto fail_alloc;
     }
+
     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
     r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
     if (r) {
@@ -714,7 +716,7 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
                                     unsigned idx)
 {
     struct vhost_vring_state state = {
-        .index = idx,
+        .index = idx % dev->nvqs,
     };
     int r;
     r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
@@ -829,7 +831,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     }
 
     for (i = 0; i < hdev->nvqs; ++i) {
-        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true);
+        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
+                                             hdev->vq_index + i,
+                                             true);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
             goto fail_vq;
@@ -839,7 +843,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     return 0;
 fail_vq:
     while (--i >= 0) {
-        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
+        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
+                                             hdev->vq_index + i,
+                                             false);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
             fflush(stderr);
@@ -860,7 +866,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     int i, r;
 
     for (i = 0; i < hdev->nvqs; ++i) {
-        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
+        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
+                                             hdev->vq_index + i,
+                                             false);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
             fflush(stderr);
@@ -879,10 +887,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
         goto fail;
     }
 
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
-    if (r < 0) {
-        fprintf(stderr, "Error binding guest notifier: %d\n", -r);
-        goto fail_notifiers;
+    if (hdev->vq_index == 0) {
+        r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
+        if (r < 0) {
+            fprintf(stderr, "Error binding guest notifier: %d\n", -r);
+            goto fail_notifiers;
+        }
     }
 
     r = vhost_dev_set_features(hdev, hdev->log_enabled);
@@ -898,7 +908,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
         r = vhost_virtqueue_init(hdev,
                                  vdev,
                                  hdev->vqs + i,
-                                 i);
+                                 hdev->vq_index + i);
         if (r < 0) {
             goto fail_vq;
         }
@@ -925,8 +935,9 @@ fail_vq:
         vhost_virtqueue_cleanup(hdev,
                                 vdev,
                                 hdev->vqs + i,
-                                i);
+                                hdev->vq_index + i);
     }
+    i = hdev->nvqs;
 fail_mem:
 fail_features:
     vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
@@ -944,21 +955,24 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
         vhost_virtqueue_cleanup(hdev,
                                 vdev,
                                 hdev->vqs + i,
-                                i);
+                                hdev->vq_index + i);
     }
     for (i = 0; i < hdev->n_mem_sections; ++i) {
         vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
                                 0, (hwaddr)~0x0ull);
     }
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
-    if (r < 0) {
-        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
-        fflush(stderr);
+    if (hdev->vq_index == 0) {
+        r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
+        if (r < 0) {
+            fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
+            fflush(stderr);
+        }
+        assert (r>= 0);
     }
-    assert (r >= 0);
 
     hdev->started = false;
     g_free(hdev->log);
     hdev->log = NULL;
     hdev->log_size = 0;
 }
+
diff --git a/hw/vhost.h b/hw/vhost.h
index 0c47229..e94a9f7 100644
--- a/hw/vhost.h
+++ b/hw/vhost.h
@@ -34,6 +34,8 @@ struct vhost_dev {
     MemoryRegionSection *mem_sections;
     struct vhost_virtqueue *vqs;
     int nvqs;
+    /* the first virtuque which would be used by this vhost dev */
+    int vq_index;
     unsigned long long features;
     unsigned long long acked_features;
     unsigned long long backend_features;
diff --git a/hw/vhost_net.c b/hw/vhost_net.c
index 8241601..cdb294c 100644
--- a/hw/vhost_net.c
+++ b/hw/vhost_net.c
@@ -138,13 +138,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
 }
 
 int vhost_net_start(struct vhost_net *net,
-                    VirtIODevice *dev)
+                    VirtIODevice *dev,
+                    int vq_index)
 {
     struct vhost_vring_file file = { };
     int r;
 
     net->dev.nvqs = 2;
     net->dev.vqs = net->vqs;
+    net->dev.vq_index = vq_index;
 
     r = vhost_dev_enable_notifiers(&net->dev, dev);
     if (r < 0) {
@@ -214,7 +216,8 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
 }
 
 int vhost_net_start(struct vhost_net *net,
-		    VirtIODevice *dev)
+                    VirtIODevice *dev,
+                    int vq_index)
 {
     return -ENOSYS;
 }
diff --git a/hw/vhost_net.h b/hw/vhost_net.h
index a9db234..c9a8429 100644
--- a/hw/vhost_net.h
+++ b/hw/vhost_net.h
@@ -9,7 +9,7 @@ typedef struct vhost_net VHostNetState;
 VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force);
 
 bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
-int vhost_net_start(VHostNetState *net, VirtIODevice *dev);
+int vhost_net_start(VHostNetState *net, VirtIODevice *dev, int vq_index);
 void vhost_net_stop(VHostNetState *net, VirtIODevice *dev);
 
 void vhost_net_cleanup(VHostNetState *net);
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index d57a5a5..70bc0e6 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -126,7 +126,8 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
         if (!vhost_net_query(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), &n->vdev)) {
             return;
         }
-        r = vhost_net_start(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), &n->vdev);
+        r = vhost_net_start(tap_get_vhost_net(qemu_get_queue(n->nic)->peer),
+                            &n->vdev, 0);
         if (r < 0) {
             error_report("unable to start vhost net: %d: "
                          "falling back on userspace virtio", -r);
-- 
1.7.1

  parent reply	other threads:[~2012-12-28 10:41 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-12-28 10:31 [Qemu-devel] [PATCH 00/12] Multiqueue virtio-net Jason Wang
2012-12-28 10:31 ` [Qemu-devel] [PATCH 01/12] tap: multiqueue support Jason Wang
2013-01-09  9:56   ` Stefan Hajnoczi
2013-01-09 15:25     ` Jason Wang
2013-01-10  8:32       ` Stefan Hajnoczi
2013-01-10 10:28   ` Stefan Hajnoczi
2013-01-10 13:52     ` Jason Wang
2012-12-28 10:31 ` [Qemu-devel] [PATCH 02/12] net: introduce qemu_get_queue() Jason Wang
2012-12-28 10:31 ` [Qemu-devel] [PATCH 03/12] net: introduce qemu_get_nic() Jason Wang
2012-12-28 10:31 ` [Qemu-devel] [PATCH 04/12] net: intorduce qemu_del_nic() Jason Wang
2012-12-28 10:31 ` [Qemu-devel] [PATCH 05/12] net: multiqueue support Jason Wang
2012-12-28 18:06   ` Blue Swirl
2012-12-28 10:31 ` Jason Wang [this message]
2012-12-28 10:31 ` [Qemu-devel] [PATCH 07/12] virtio: introduce virtio_queue_del() Jason Wang
2013-01-08  7:14   ` Michael S. Tsirkin
2013-01-08  9:28     ` Jason Wang
2012-12-28 10:32 ` [Qemu-devel] [PATCH 08/12] virtio: add a queue_index to VirtQueue Jason Wang
2012-12-28 10:32 ` [Qemu-devel] [PATCH 09/12] virtio-net: separate virtqueue from VirtIONet Jason Wang
2012-12-28 10:32 ` [Qemu-devel] [PATCH 10/12] virtio-net: multiqueue support Jason Wang
2012-12-28 17:52   ` Blue Swirl
2013-01-04  5:12     ` Jason Wang
2013-01-04 20:41       ` Blue Swirl
2013-01-08  9:07   ` Wanlong Gao
2013-01-08  9:29     ` Jason Wang
2013-01-08  9:32       ` Wanlong Gao
2013-01-08  9:49       ` Wanlong Gao
2013-01-08  9:51         ` Jason Wang
2013-01-08 10:00           ` Wanlong Gao
2013-01-08 10:14             ` Jason Wang
2013-01-08 11:24               ` Wanlong Gao
2013-01-09  3:11                 ` Jason Wang
2013-01-09  8:23               ` Wanlong Gao
2013-01-09  9:30                 ` Jason Wang
2013-01-09 10:01                   ` Wanlong Gao
2013-01-09 15:26                     ` Jason Wang
2013-01-10  6:43                       ` Jason Wang
2013-01-10  6:49                         ` Wanlong Gao
2013-01-10  7:16                           ` Jason Wang
2013-01-10  9:06                             ` Wanlong Gao
2013-01-10  9:40                               ` Jason Wang
2012-12-28 10:32 ` [Qemu-devel] [PATCH 11/12] virtio-net: migration support for multiqueue Jason Wang
2013-01-08  7:10   ` Michael S. Tsirkin
2013-01-08  9:27     ` Jason Wang
2012-12-28 10:32 ` [Qemu-devel] [PATCH 12/12] virtio-net: compat multiqueue support Jason Wang
2013-01-09 14:29 ` [Qemu-devel] [PATCH 00/12] Multiqueue virtio-net Stefan Hajnoczi
2013-01-09 15:32   ` Michael S. Tsirkin
2013-01-09 15:33     ` Jason Wang
2013-01-10  8:44       ` Stefan Hajnoczi
2013-01-10  9:34         ` Jason Wang
2013-01-10 11:49           ` Stefan Hajnoczi
2013-01-10 14:15             ` Jason Wang
2013-01-14 19:44 ` Anthony Liguori
2013-01-15 10:12   ` Jason Wang
2013-01-16 15:09     ` Anthony Liguori
2013-01-16 15:19       ` Michael S. Tsirkin
2013-01-16 16:14         ` Anthony Liguori
2013-01-16 16:48           ` Michael S. Tsirkin
2013-01-17 10:31           ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1356690724-37891-7-git-send-email-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=aliguori@us.ibm.com \
    --cc=jwhan@filewood.snu.ac.kr \
    --cc=krkumar2@in.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=mprivozn@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rusty@rustcorp.com.au \
    --cc=shiyer@redhat.com \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).