From: Jason Wang <jasowang@redhat.com>
To: mst@redhat.com, jasowang@redhat.com, qemu-devel@nongnu.org
Cc: eperezma@redhat.com, elic@nvidia.com, gdawar@xilinx.com,
lingshan.zhu@intel.com, lulu@redhat.com
Subject: [PATCH V4 06/10] vhost-net: control virtqueue support
Date: Mon, 11 Oct 2021 12:28:25 +0800 [thread overview]
Message-ID: <20211011042829.4159-7-jasowang@redhat.com> (raw)
In-Reply-To: <20211011042829.4159-1-jasowang@redhat.com>
We assume there's no cvq in the past, this is not true when we need
control virtqueue support for vhost-user backends. So this patch
implements the control virtqueue support for vhost-net. As datapath,
the control virtqueue is also required to be coupled with the
NetClientState. The vhost_net_start/stop() are tweaked to accept the
number of datapath queue pairs plus the the number of control
virtqueue for us to start and stop the vhost device.
Signed-off-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20210907090322.1756-7-jasowang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
hw/net/vhost_net-stub.c | 4 ++--
hw/net/vhost_net.c | 43 ++++++++++++++++++++++++++++++-----------
hw/net/virtio-net.c | 4 ++--
include/net/vhost_net.h | 6 ++++--
4 files changed, 40 insertions(+), 17 deletions(-)
diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c
index a7f4252630..89d71cfb8e 100644
--- a/hw/net/vhost_net-stub.c
+++ b/hw/net/vhost_net-stub.c
@@ -33,13 +33,13 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
int vhost_net_start(VirtIODevice *dev,
NetClientState *ncs,
- int total_queues)
+ int data_queue_pairs, int cvq)
{
return -ENOSYS;
}
void vhost_net_stop(VirtIODevice *dev,
NetClientState *ncs,
- int total_queues)
+ int data_queue_pairs, int cvq)
{
}
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 386ec2eaa2..e1e9d1ec89 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -315,11 +315,14 @@ static void vhost_net_stop_one(struct vhost_net *net,
}
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
- int total_queues)
+ int data_queue_pairs, int cvq)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+ int total_notifiers = data_queue_pairs * 2 + cvq;
+ VirtIONet *n = VIRTIO_NET(dev);
+ int nvhosts = data_queue_pairs + cvq;
struct vhost_net *net;
int r, e, i;
NetClientState *peer;
@@ -329,9 +332,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
return -ENOSYS;
}
- for (i = 0; i < total_queues; i++) {
+ for (i = 0; i < nvhosts; i++) {
+
+ if (i < data_queue_pairs) {
+ peer = qemu_get_peer(ncs, i);
+ } else { /* Control Virtqueue */
+ peer = qemu_get_peer(ncs, n->max_queues);
+ }
- peer = qemu_get_peer(ncs, i);
net = get_vhost_net(peer);
vhost_net_set_vq_index(net, i * 2);
@@ -344,14 +352,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
}
}
- r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
+ r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
if (r < 0) {
error_report("Error binding guest notifier: %d", -r);
goto err;
}
- for (i = 0; i < total_queues; i++) {
- peer = qemu_get_peer(ncs, i);
+ for (i = 0; i < nvhosts; i++) {
+ if (i < data_queue_pairs) {
+ peer = qemu_get_peer(ncs, i);
+ } else {
+ peer = qemu_get_peer(ncs, n->max_queues);
+ }
r = vhost_net_start_one(get_vhost_net(peer), dev);
if (r < 0) {
@@ -375,7 +387,7 @@ err_start:
peer = qemu_get_peer(ncs , i);
vhost_net_stop_one(get_vhost_net(peer), dev);
}
- e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+ e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
if (e < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
fflush(stderr);
@@ -385,18 +397,27 @@ err:
}
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
- int total_queues)
+ int data_queue_pairs, int cvq)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+ VirtIONet *n = VIRTIO_NET(dev);
+ NetClientState *peer;
+ int total_notifiers = data_queue_pairs * 2 + cvq;
+ int nvhosts = data_queue_pairs + cvq;
int i, r;
- for (i = 0; i < total_queues; i++) {
- vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
+ for (i = 0; i < nvhosts; i++) {
+ if (i < data_queue_pairs) {
+ peer = qemu_get_peer(ncs, i);
+ } else {
+ peer = qemu_get_peer(ncs, n->max_queues);
+ }
+ vhost_net_stop_one(get_vhost_net(peer), dev);
}
- r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+ r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index f205331dcf..f1119cf0ad 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
}
n->vhost_started = 1;
- r = vhost_net_start(vdev, n->nic->ncs, queues);
+ r = vhost_net_start(vdev, n->nic->ncs, queues, 0);
if (r < 0) {
error_report("unable to start vhost net: %d: "
"falling back on userspace virtio", -r);
n->vhost_started = 0;
}
} else {
- vhost_net_stop(vdev, n->nic->ncs, queues);
+ vhost_net_stop(vdev, n->nic->ncs, queues, 0);
n->vhost_started = 0;
}
}
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index fba40cf695..387e913e4e 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -21,8 +21,10 @@ typedef struct VhostNetOptions {
uint64_t vhost_net_get_max_queues(VHostNetState *net);
struct vhost_net *vhost_net_init(VhostNetOptions *options);
-int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
-void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
+ int data_queue_pairs, int cvq);
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
+ int data_queue_pairs, int cvq);
void vhost_net_cleanup(VHostNetState *net);
--
2.25.1
next prev parent reply other threads:[~2021-10-11 4:33 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-11 4:28 [PATCH V4 00/10] vhost-vDPA multiqueue Jason Wang
2021-10-11 4:28 ` [PATCH V4 01/10] vhost-vdpa: open device fd in net_init_vhost_vdpa() Jason Wang
2021-10-11 4:28 ` [PATCH V4 02/10] vhost-vdpa: classify one time request Jason Wang
2021-10-11 4:28 ` [PATCH V4 03/10] vhost-vdpa: prepare for the multiqueue support Jason Wang
2021-10-18 15:44 ` Stefano Garzarella
2021-10-20 2:50 ` Jason Wang
2021-10-11 4:28 ` [PATCH V4 04/10] vhost-vdpa: let net_vhost_vdpa_init() returns NetClientState * Jason Wang
2021-10-11 4:28 ` [PATCH V4 05/10] net: introduce control client Jason Wang
2021-10-11 4:28 ` Jason Wang [this message]
2021-10-11 4:28 ` [PATCH V4 07/10] virtio-net: use "queue_pairs" instead of "queues" when possible Jason Wang
2021-10-11 4:28 ` [PATCH V4 08/10] vhost: record the last virtqueue index for the virtio device Jason Wang
2021-10-11 4:28 ` [PATCH V4 09/10] virtio-net: vhost control virtqueue support Jason Wang
2021-10-11 4:28 ` [PATCH V4 10/10] vhost-vdpa: multiqueue support Jason Wang
2021-10-19 7:21 ` [PATCH V4 00/10] vhost-vDPA multiqueue Michael S. Tsirkin
2021-10-19 7:24 ` Jason Wang
2021-10-19 10:44 ` Michael S. Tsirkin
2021-10-19 11:17 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211011042829.4159-7-jasowang@redhat.com \
--to=jasowang@redhat.com \
--cc=elic@nvidia.com \
--cc=eperezma@redhat.com \
--cc=gdawar@xilinx.com \
--cc=lingshan.zhu@intel.com \
--cc=lulu@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).