qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: mst@redhat.com, jasowang@redhat.com, qemu-devel@nongnu.org
Cc: eperezma@redhat.com, elic@nvidia.com, gdawar@xilinx.com,
	lingshan.zhu@intel.com, lulu@redhat.com
Subject: [PATCH V2 17/21] vhost-net: control virtqueue support
Date: Fri,  3 Sep 2021 17:10:27 +0800	[thread overview]
Message-ID: <20210903091031.47303-18-jasowang@redhat.com> (raw)
In-Reply-To: <20210903091031.47303-1-jasowang@redhat.com>

We assume there's no cvq in the past, this is not true when we need
control virtqueue support for vhost-user backends. So this patch
implements the control virtqueue support for vhost-net. As datapath,
the control virtqueue is also required to be coupled with the
NetClientState. The vhost_net_start/stop() are tweaked to accept the
number of datapath queue pairs plus the the number of control
virtqueue for us to start and stop the vhost device.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/net/vhost_net.c      | 43 ++++++++++++++++++++++++++++++-----------
 hw/net/virtio-net.c     |  4 ++--
 include/net/vhost_net.h |  6 ++++--
 3 files changed, 38 insertions(+), 15 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 386ec2eaa2..7e0b60b4d9 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -315,11 +315,14 @@ static void vhost_net_stop_one(struct vhost_net *net,
 }
 
 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
-                    int total_queues)
+                    int data_qps, int cvq)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
     VirtioBusState *vbus = VIRTIO_BUS(qbus);
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+    int total_notifiers = data_qps * 2 + cvq;
+    VirtIONet *n = VIRTIO_NET(dev);
+    int nvhosts = data_qps + cvq;
     struct vhost_net *net;
     int r, e, i;
     NetClientState *peer;
@@ -329,9 +332,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
         return -ENOSYS;
     }
 
-    for (i = 0; i < total_queues; i++) {
+    for (i = 0; i < nvhosts; i++) {
+
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else { /* Control Virtqueue */
+            peer = qemu_get_peer(ncs, n->max_queues);
+        }
 
-        peer = qemu_get_peer(ncs, i);
         net = get_vhost_net(peer);
         vhost_net_set_vq_index(net, i * 2);
 
@@ -344,14 +352,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
         }
      }
 
-    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
+    r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
     if (r < 0) {
         error_report("Error binding guest notifier: %d", -r);
         goto err;
     }
 
-    for (i = 0; i < total_queues; i++) {
-        peer = qemu_get_peer(ncs, i);
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_queues);
+        }
         r = vhost_net_start_one(get_vhost_net(peer), dev);
 
         if (r < 0) {
@@ -375,7 +387,7 @@ err_start:
         peer = qemu_get_peer(ncs , i);
         vhost_net_stop_one(get_vhost_net(peer), dev);
     }
-    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+    e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
     if (e < 0) {
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
         fflush(stderr);
@@ -385,18 +397,27 @@ err:
 }
 
 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
-                    int total_queues)
+                    int data_qps, int cvq)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
     VirtioBusState *vbus = VIRTIO_BUS(qbus);
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+    VirtIONet *n = VIRTIO_NET(dev);
+    NetClientState *peer;
+    int total_notifiers = data_qps * 2 + cvq;
+    int nvhosts = data_qps + cvq;
     int i, r;
 
-    for (i = 0; i < total_queues; i++) {
-        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_queues);
+        }
+        vhost_net_stop_one(get_vhost_net(peer), dev);
     }
 
-    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+    r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
     if (r < 0) {
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
         fflush(stderr);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 16d20cdee5..8fccbaa44c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
         }
 
         n->vhost_started = 1;
-        r = vhost_net_start(vdev, n->nic->ncs, queues);
+        r = vhost_net_start(vdev, n->nic->ncs, queues, 0);
         if (r < 0) {
             error_report("unable to start vhost net: %d: "
                          "falling back on userspace virtio", -r);
             n->vhost_started = 0;
         }
     } else {
-        vhost_net_stop(vdev, n->nic->ncs, queues);
+        vhost_net_stop(vdev, n->nic->ncs, queues, 0);
         n->vhost_started = 0;
     }
 }
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index fba40cf695..e656e38af9 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -21,8 +21,10 @@ typedef struct VhostNetOptions {
 uint64_t vhost_net_get_max_queues(VHostNetState *net);
 struct vhost_net *vhost_net_init(VhostNetOptions *options);
 
-int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
-void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
+                    int data_qps, int cvq);
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
+                    int data_qps, int cvq);
 
 void vhost_net_cleanup(VHostNetState *net);
 
-- 
2.25.1



  parent reply	other threads:[~2021-09-03  9:24 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-03  9:10 [PATCH V2 00/21] vhost-vDPA multiqueue Jason Wang
2021-09-03  9:10 ` [PATCH V2 01/21] vhost-vdpa: remove unused variable "acked_features" Jason Wang
2021-09-03  9:10 ` [PATCH V2 02/21] vhost-vdpa: correctly return err in vhost_vdpa_set_backend_cap() Jason Wang
2021-09-03  9:10 ` [PATCH V2 03/21] vhost_net: remove the meaningless assignment in vhost_net_start_one() Jason Wang
2021-09-03  9:10 ` [PATCH V2 04/21] vhost: use unsigned int for nvqs Jason Wang
2021-09-03  9:10 ` [PATCH V2 05/21] vhost_net: do not assume nvqs is always 2 Jason Wang
2021-09-03  9:10 ` [PATCH V2 06/21] vhost-vdpa: remove the unnecessary check in vhost_vdpa_add() Jason Wang
2021-09-03  9:10 ` [PATCH V2 07/21] vhost-vdpa: don't cleanup twice " Jason Wang
2021-09-03  9:10 ` [PATCH V2 08/21] vhost-vdpa: fix leaking of vhost_net " Jason Wang
2021-09-03  9:10 ` [PATCH V2 09/21] vhost-vdpa: tweak the error label " Jason Wang
2021-09-03  9:10 ` [PATCH V2 10/21] vhost-vdpa: fix the wrong assertion in vhost_vdpa_init() Jason Wang
2021-09-03  9:10 ` [PATCH V2 11/21] vhost-vdpa: remove the unncessary queue_index assignment Jason Wang
2021-09-03  9:10 ` [PATCH V2 12/21] vhost-vdpa: open device fd in net_init_vhost_vdpa() Jason Wang
2021-09-04 20:41   ` Michael S. Tsirkin
2021-09-03  9:10 ` [PATCH V2 13/21] vhost-vdpa: classify one time request Jason Wang
2021-09-03  9:10 ` [PATCH V2 14/21] vhost-vdpa: prepare for the multiqueue support Jason Wang
2021-09-03  9:10 ` [PATCH V2 15/21] vhost-vdpa: let net_vhost_vdpa_init() returns NetClientState * Jason Wang
2021-09-03  9:10 ` [PATCH V2 16/21] net: introduce control client Jason Wang
2021-09-03  9:10 ` Jason Wang [this message]
2021-09-04 20:40   ` [PATCH V2 17/21] vhost-net: control virtqueue support Michael S. Tsirkin
2021-09-06  3:43     ` Jason Wang
2021-09-03  9:10 ` [PATCH V2 18/21] virito-net: use "qps" instead of "queues" when possible Jason Wang
2021-09-04 20:42   ` Michael S. Tsirkin
2021-09-06  3:42     ` Jason Wang
2021-09-06  5:49       ` Michael S. Tsirkin
2021-09-06  6:54         ` Jason Wang
2021-09-03  9:10 ` [PATCH V2 19/21] vhost: record the last virtqueue index for the virtio device Jason Wang
2021-09-03  9:10 ` [PATCH V2 20/21] virtio-net: vhost control virtqueue support Jason Wang
2021-09-03  9:10 ` [PATCH V2 21/21] vhost-vdpa: multiqueue support Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210903091031.47303-18-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=elic@nvidia.com \
    --cc=eperezma@redhat.com \
    --cc=gdawar@xilinx.com \
    --cc=lingshan.zhu@intel.com \
    --cc=lulu@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).