From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Cornelia Huck <cohuck@redhat.com>,
Markus Armbruster <armbru@redhat.com>,
Gautam Dawar <gdawar@xilinx.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
"Gonglei \(Arei\)" <arei.gonglei@huawei.com>,
Peter Xu <peterx@redhat.com>, Eli Cohen <eli@mellanox.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Zhu Lingshan <lingshan.zhu@intel.com>,
Eric Blake <eblake@redhat.com>,
Liuxiangdong <liuxiangdong5@huawei.com>
Subject: [RFC PATCH v5 23/23] vdpa: Add x-cvq-svq
Date: Fri, 8 Apr 2022 15:34:15 +0200 [thread overview]
Message-ID: <20220408133415.1371760-24-eperezma@redhat.com> (raw)
In-Reply-To: <20220408133415.1371760-1-eperezma@redhat.com>
This isolates shadow cvq in its own group.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
qapi/net.json | 8 +++-
net/vhost-vdpa.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 100 insertions(+), 6 deletions(-)
diff --git a/qapi/net.json b/qapi/net.json
index 92848e4362..39c245e6cd 100644
--- a/qapi/net.json
+++ b/qapi/net.json
@@ -447,9 +447,12 @@
#
# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1)
# (default: false)
+# @x-cvq-svq: Start device with (experimental) shadow virtqueue in its own
+# virtqueue group. (Since 7.1)
+# (default: false)
#
# Features:
-# @unstable: Member @x-svq is experimental.
+# @unstable: Members @x-svq and x-cvq-svq are experimental.
#
# Since: 5.1
##
@@ -457,7 +460,8 @@
'data': {
'*vhostdev': 'str',
'*queues': 'int',
- '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
+ '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] },
+ '*x-cvq-svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
##
# @NetClientDriver:
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 6207ead884..e907ef1618 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -379,6 +379,17 @@ static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
return ret;
}
+static int vhost_vdpa_get_backend_features(int fd, uint64_t *features,
+ Error **errp)
+{
+ int ret = ioctl(fd, VHOST_GET_BACKEND_FEATURES, features);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "Fail to query backend features from vhost-vDPA device");
+ }
+ return ret;
+}
+
static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
int *has_cvq, Error **errp)
{
@@ -412,16 +423,56 @@ static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
return 1;
}
+/**
+ * Check vdpa device to support CVQ group asid 1
+ *
+ * @vdpa_device_fd: Vdpa device fd
+ * @queue_pairs: Queue pairs
+ * @errp: Error
+ */
+static int vhost_vdpa_check_cvq_svq(int vdpa_device_fd, int queue_pairs,
+ Error **errp)
+{
+ uint64_t backend_features;
+ unsigned num_as;
+ int r;
+
+ r = vhost_vdpa_get_backend_features(vdpa_device_fd, &backend_features,
+ errp);
+ if (unlikely(r)) {
+ return -1;
+ }
+
+ if (unlikely(!(backend_features & VHOST_BACKEND_F_IOTLB_ASID))) {
+ error_setg(errp, "Device without IOTLB_ASID feature");
+ return -1;
+ }
+
+ r = ioctl(vdpa_device_fd, VHOST_VDPA_GET_AS_NUM, &num_as);
+ if (unlikely(r)) {
+ error_setg_errno(errp, errno,
+ "Cannot retrieve number of supported ASs");
+ return -1;
+ }
+ if (unlikely(num_as < 2)) {
+ error_setg(errp, "Insufficient number of ASs (%u, min: 2)", num_as);
+ }
+
+ return 0;
+}
+
int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp)
{
const NetdevVhostVDPAOptions *opts;
+ struct vhost_vdpa_iova_range iova_range;
uint64_t features;
int vdpa_device_fd;
g_autofree NetClientState **ncs = NULL;
NetClientState *nc;
int queue_pairs, r, i, has_cvq = 0;
g_autoptr(VhostIOVATree) iova_tree = NULL;
+ ERRP_GUARD();
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
opts = &netdev->u.vhost_vdpa;
@@ -446,8 +497,9 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
qemu_close(vdpa_device_fd);
return queue_pairs;
}
- if (opts->x_svq) {
- struct vhost_vdpa_iova_range iova_range;
+ if (opts->x_cvq_svq || opts->x_svq) {
+ vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
+
uint64_t invalid_dev_features =
features & ~vdpa_svq_device_features &
/* Transport are all accepted at this point */
@@ -459,7 +511,21 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
invalid_dev_features);
goto err_svq;
}
- vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
+ }
+
+ if (opts->x_cvq_svq) {
+ if (!has_cvq) {
+ error_setg(errp, "Cannot use x-cvq-svq with a device without cvq");
+ goto err_svq;
+ }
+
+ r = vhost_vdpa_check_cvq_svq(vdpa_device_fd, queue_pairs, errp);
+ if (unlikely(r)) {
+ error_prepend(errp, "Cannot configure CVQ SVQ: ");
+ goto err_svq;
+ }
+ }
+ if (opts->x_svq) {
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
}
@@ -474,11 +540,35 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
}
if (has_cvq) {
+ g_autoptr(VhostIOVATree) cvq_iova_tree = NULL;
+
+ if (opts->x_cvq_svq) {
+ cvq_iova_tree = vhost_iova_tree_new(iova_range.first,
+ iova_range.last);
+ }
+
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, 2 * queue_pairs + 1,
- false, opts->x_svq, iova_tree);
+ false, opts->x_cvq_svq || opts->x_svq,
+ opts->x_cvq_svq ? cvq_iova_tree : iova_tree);
if (!nc)
goto err;
+
+ if (opts->x_cvq_svq) {
+ struct vhost_vring_state asid = {
+ .index = 1,
+ .num = 1,
+ };
+
+ r = ioctl(vdpa_device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
+ if (unlikely(r)) {
+ error_setg_errno(errp, errno,
+ "Cannot set cvq group independent asid");
+ goto err;
+ }
+ }
+
+ cvq_iova_tree = NULL;
}
iova_tree = NULL;
--
2.27.0
prev parent reply other threads:[~2022-04-08 14:03 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-08 13:33 [RFC PATCH v5 00/23] Net Control VQ support with asid in vDPA SVQ Eugenio Pérez
2022-04-08 13:33 ` [RFC PATCH v5 01/23] vdpa: Add missing tracing to batch mapping functions Eugenio Pérez
2022-04-14 3:32 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 02/23] vdpa: Fix bad index calculus at vhost_vdpa_get_vring_base Eugenio Pérez
2022-04-14 3:34 ` Jason Wang
2022-04-22 9:00 ` Eugenio Perez Martin
2022-04-08 13:33 ` [RFC PATCH v5 03/23] util: Return void on iova_tree_remove Eugenio Pérez
2022-04-14 3:36 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 04/23] hw/virtio: Replace g_memdup() by g_memdup2() Eugenio Pérez
2022-04-14 3:37 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 05/23] vhost: Fix bad return of descriptors to SVQ Eugenio Pérez
2022-04-14 3:38 ` Jason Wang
2022-04-08 13:33 ` [RFC PATCH v5 06/23] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-04-14 3:42 ` Jason Wang
2022-04-18 10:34 ` Eugenio Perez Martin
2022-04-08 13:33 ` [RFC PATCH v5 07/23] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 08/23] vdpa: Fix index calculus at vhost_vdpa_svqs_start Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 09/23] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 10/23] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 11/23] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 12/23] vhost: Add SVQElement Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 13/23] vhost: Add custom used buffer callback Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 14/23] vdpa: control virtqueue support on shadow virtqueue Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 15/23] vhost: Add vhost_iova_tree_find Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 16/23] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 17/23] vhost: Add vhost_svq_inject Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 18/23] vdpa: add NetClientState->start() callback Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 19/23] vdpa: Add vhost_vdpa_start_control_svq Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 20/23] vhost: Update kernel headers Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 21/23] vhost: Make possible to check for device exclusive vq group Eugenio Pérez
2022-04-08 13:34 ` [RFC PATCH v5 22/23] vdpa: Add asid attribute to vdpa device Eugenio Pérez
2022-04-08 13:34 ` Eugenio Pérez [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220408133415.1371760-24-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=arei.gonglei@huawei.com \
--cc=armbru@redhat.com \
--cc=cohuck@redhat.com \
--cc=eblake@redhat.com \
--cc=eli@mellanox.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=liuxiangdong5@huawei.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).