From: Jason Wang <jasowang@redhat.com>
To: mst@redhat.com, jasowang@redhat.com, qemu-devel@nongnu.org
Cc: eperezma@redhat.com, elic@nvidia.com, gdawar@xilinx.com,
lingshan.zhu@intel.com, lulu@redhat.com
Subject: [PATCH V2 21/21] vhost-vdpa: multiqueue support
Date: Fri, 3 Sep 2021 17:10:31 +0800 [thread overview]
Message-ID: <20210903091031.47303-22-jasowang@redhat.com> (raw)
In-Reply-To: <20210903091031.47303-1-jasowang@redhat.com>
This patch implements the multiqueue support for vhost-vdpa. This is
done simply by reading the number of queue pairs from the config space
and initialize the datapath and control path net client.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/virtio/vhost-vdpa.c | 2 +-
net/vhost-vdpa.c | 104 +++++++++++++++++++++++++++++++++++++----
2 files changed, 96 insertions(+), 10 deletions(-)
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 94eb9d4069..b5df7594ff 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -578,7 +578,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
}
- if (vhost_vdpa_one_time_request(dev)) {
+ if (dev->vq_index + dev->nvqs != dev->last_index) {
return 0;
}
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 834dab28dd..63cb83d6f4 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -18,6 +18,7 @@
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "qapi/error.h"
+#include <linux/vhost.h>
#include <sys/ioctl.h>
#include <err.h>
#include "standard-headers/linux/virtio_net.h"
@@ -51,6 +52,14 @@ const int vdpa_feature_bits[] = {
VIRTIO_NET_F_HOST_UFO,
VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_NET_F_MTU,
+ VIRTIO_NET_F_CTRL_RX,
+ VIRTIO_NET_F_CTRL_RX_EXTRA,
+ VIRTIO_NET_F_CTRL_VLAN,
+ VIRTIO_NET_F_GUEST_ANNOUNCE,
+ VIRTIO_NET_F_CTRL_MAC_ADDR,
+ VIRTIO_NET_F_RSS,
+ VIRTIO_NET_F_MQ,
+ VIRTIO_NET_F_CTRL_VQ,
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_PACKED,
VIRTIO_NET_F_RSS,
@@ -81,7 +90,8 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
return ret;
}
-static int vhost_vdpa_add(NetClientState *ncs, void *be)
+static int vhost_vdpa_add(NetClientState *ncs, void *be, int qp_index,
+ int nvqs)
{
VhostNetOptions options;
struct vhost_net *net = NULL;
@@ -94,7 +104,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be)
options.net_backend = ncs;
options.opaque = be;
options.busyloop_timeout = 0;
- options.nvqs = 2;
+ options.nvqs = nvqs;
net = vhost_net_init(&options);
if (!net) {
@@ -158,18 +168,28 @@ static NetClientInfo net_vhost_vdpa_info = {
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
const char *device,
const char *name,
- int vdpa_device_fd)
+ int vdpa_device_fd,
+ int qp_index,
+ int nvqs,
+ bool is_datapath)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
int ret = 0;
assert(name);
- nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
+ if (is_datapath) {
+ nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
+ name);
+ } else {
+ nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer,
+ device, name);
+ }
snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
s = DO_UPCAST(VhostVDPAState, nc, nc);
s->vhost_vdpa.device_fd = vdpa_device_fd;
- ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa);
+ s->vhost_vdpa.index = qp_index;
+ ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, qp_index, nvqs);
if (ret) {
qemu_del_net_client(nc);
return NULL;
@@ -195,12 +215,52 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
return 0;
}
+static int vhost_vdpa_get_max_qps(int fd, int *has_cvq, Error **errp)
+{
+ unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
+ struct vhost_vdpa_config *config;
+ __virtio16 *max_qps;
+ uint64_t features;
+ int ret;
+
+ ret = ioctl(fd, VHOST_GET_FEATURES, &features);
+ if (ret) {
+ error_setg(errp, "Fail to query features from vhost-vDPA device");
+ return ret;
+ }
+
+ if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
+ *has_cvq = 1;
+ } else {
+ *has_cvq = 0;
+ }
+
+ if (features & (1 << VIRTIO_NET_F_MQ)) {
+ config = g_malloc0(config_size + sizeof(*max_qps));
+ config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
+ config->len = sizeof(*max_qps);
+
+ ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
+ if (ret) {
+ error_setg(errp, "Fail to get config from vhost-vDPA device");
+ return -ret;
+ }
+
+ max_qps = (__virtio16 *)&config->buf;
+
+ return lduw_le_p(max_qps);
+ }
+
+ return 1;
+}
+
int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp)
{
const NetdevVhostVDPAOptions *opts;
int vdpa_device_fd;
- NetClientState *nc;
+ NetClientState **ncs, *nc;
+ int qps, i, has_cvq = 0;
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
opts = &netdev->u.vhost_vdpa;
@@ -215,11 +275,37 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
return -errno;
}
- nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd);
- if (!nc) {
+ qps = vhost_vdpa_get_max_qps(vdpa_device_fd, &has_cvq, errp);
+ if (qps < 0) {
qemu_close(vdpa_device_fd);
- return -1;
+ return qps;
+ }
+
+ ncs = g_malloc0(sizeof(*ncs) * qps);
+
+ for (i = 0; i < qps; i++) {
+ ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
+ vdpa_device_fd, i, 2, true);
+ if (!ncs[i])
+ goto err;
+ }
+
+ if (has_cvq) {
+ nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
+ vdpa_device_fd, i, 1, false);
+ if (!nc)
+ goto err;
}
+ g_free(ncs);
return 0;
+
+err:
+ if (i) {
+ qemu_del_net_client(ncs[0]);
+ }
+ qemu_close(vdpa_device_fd);
+ g_free(ncs);
+
+ return -1;
}
--
2.25.1
prev parent reply other threads:[~2021-09-03 9:41 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-03 9:10 [PATCH V2 00/21] vhost-vDPA multiqueue Jason Wang
2021-09-03 9:10 ` [PATCH V2 01/21] vhost-vdpa: remove unused variable "acked_features" Jason Wang
2021-09-03 9:10 ` [PATCH V2 02/21] vhost-vdpa: correctly return err in vhost_vdpa_set_backend_cap() Jason Wang
2021-09-03 9:10 ` [PATCH V2 03/21] vhost_net: remove the meaningless assignment in vhost_net_start_one() Jason Wang
2021-09-03 9:10 ` [PATCH V2 04/21] vhost: use unsigned int for nvqs Jason Wang
2021-09-03 9:10 ` [PATCH V2 05/21] vhost_net: do not assume nvqs is always 2 Jason Wang
2021-09-03 9:10 ` [PATCH V2 06/21] vhost-vdpa: remove the unnecessary check in vhost_vdpa_add() Jason Wang
2021-09-03 9:10 ` [PATCH V2 07/21] vhost-vdpa: don't cleanup twice " Jason Wang
2021-09-03 9:10 ` [PATCH V2 08/21] vhost-vdpa: fix leaking of vhost_net " Jason Wang
2021-09-03 9:10 ` [PATCH V2 09/21] vhost-vdpa: tweak the error label " Jason Wang
2021-09-03 9:10 ` [PATCH V2 10/21] vhost-vdpa: fix the wrong assertion in vhost_vdpa_init() Jason Wang
2021-09-03 9:10 ` [PATCH V2 11/21] vhost-vdpa: remove the unncessary queue_index assignment Jason Wang
2021-09-03 9:10 ` [PATCH V2 12/21] vhost-vdpa: open device fd in net_init_vhost_vdpa() Jason Wang
2021-09-04 20:41 ` Michael S. Tsirkin
2021-09-03 9:10 ` [PATCH V2 13/21] vhost-vdpa: classify one time request Jason Wang
2021-09-03 9:10 ` [PATCH V2 14/21] vhost-vdpa: prepare for the multiqueue support Jason Wang
2021-09-03 9:10 ` [PATCH V2 15/21] vhost-vdpa: let net_vhost_vdpa_init() returns NetClientState * Jason Wang
2021-09-03 9:10 ` [PATCH V2 16/21] net: introduce control client Jason Wang
2021-09-03 9:10 ` [PATCH V2 17/21] vhost-net: control virtqueue support Jason Wang
2021-09-04 20:40 ` Michael S. Tsirkin
2021-09-06 3:43 ` Jason Wang
2021-09-03 9:10 ` [PATCH V2 18/21] virito-net: use "qps" instead of "queues" when possible Jason Wang
2021-09-04 20:42 ` Michael S. Tsirkin
2021-09-06 3:42 ` Jason Wang
2021-09-06 5:49 ` Michael S. Tsirkin
2021-09-06 6:54 ` Jason Wang
2021-09-03 9:10 ` [PATCH V2 19/21] vhost: record the last virtqueue index for the virtio device Jason Wang
2021-09-03 9:10 ` [PATCH V2 20/21] virtio-net: vhost control virtqueue support Jason Wang
2021-09-03 9:10 ` Jason Wang [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210903091031.47303-22-jasowang@redhat.com \
--to=jasowang@redhat.com \
--cc=elic@nvidia.com \
--cc=eperezma@redhat.com \
--cc=gdawar@xilinx.com \
--cc=lingshan.zhu@intel.com \
--cc=lulu@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).