From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Gautam Dawar <gdawar@xilinx.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Eli Cohen <eli@mellanox.com>,
Zhu Lingshan <lingshan.zhu@intel.com>
Subject: [RFC PATCH 9/9] vdpa: control virtqueue support on shadow virtqueue
Date: Mon, 14 Feb 2022 20:16:35 +0100 [thread overview]
Message-ID: <20220214191635.1604932-10-eperezma@redhat.com> (raw)
In-Reply-To: <20220214191635.1604932-1-eperezma@redhat.com>
Introduce the control virtqueue support for vDPA shadow virtqueue. This
is needed for advanced networking features like multiqueue.
To demonstrate command handling, VIRTIO_NET_F_CTROL_MACADDR is
implemented. If vDPA device is started with SVQ support, and MAC changes
in the source VM, it will be transfered with the rest of properties in
the emulated virtio-net device model.
A new CVQ command will be reproduced at destination so that NIC is aware
of the changed MAC.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
net/vhost-vdpa.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 108 insertions(+), 2 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index fc844a7ce6..ea4e489070 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -11,6 +11,7 @@
#include "qemu/osdep.h"
#include "clients.h"
+#include "hw/virtio/virtio-net.h"
#include "net/vhost_net.h"
#include "net/vhost-vdpa.h"
#include "hw/virtio/vhost-vdpa.h"
@@ -70,6 +71,28 @@ const int vdpa_feature_bits[] = {
VHOST_INVALID_FEATURE_BIT
};
+/** Supported device specific feature bits with SVQ */
+static const uint64_t vdpa_svq_device_features =
+ BIT_ULL(VIRTIO_NET_F_CSUM) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
+ BIT_ULL(VIRTIO_NET_F_MTU) |
+ BIT_ULL(VIRTIO_NET_F_MAC) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
+ BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
+ BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
+ BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
+ BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
+ BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
+ BIT_ULL(VIRTIO_NET_F_STATUS) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
+ BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
+ BIT_ULL(VIRTIO_NET_F_STANDBY);
+
VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
@@ -203,6 +226,79 @@ static void vhost_vdpa_get_iova_range(int fd,
}
}
+static bool vhost_vdpa_start_control_svq(VhostShadowVirtqueue *svq,
+ VirtIODevice *vdev)
+{
+ VirtIONet *n = VIRTIO_NET(vdev);
+ NetClientState *nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc->peer);
+ uint64_t features = vdev->host_features;
+ assert(s->nc.info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ const struct virtio_net_ctrl_hdr ctrl = {
+ .class = VIRTIO_NET_CTRL_MAC,
+ .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ };
+ uint8_t mac[6];
+ const struct iovec data[] = {
+ {
+ .iov_base = (void *)&ctrl,
+ .iov_len = sizeof(ctrl),
+ },{
+ .iov_base = mac,
+ .iov_len = sizeof(mac),
+ },{
+ .iov_base = NULL,
+ .iov_len = sizeof(virtio_net_ctrl_ack),
+ }
+ };
+ bool ret;
+
+ /* TODO: Only best effort? */
+ memcpy(mac, n->mac, sizeof(mac));
+ ret = vhost_svq_inject(svq, data, 2, 1);
+ if (!ret) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void vhost_vdpa_net_handle_ctrl(VirtIODevice *vdev,
+ const VirtQueueElement *elem)
+{
+ struct virtio_net_ctrl_hdr ctrl;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ size_t s;
+ struct iovec in = {
+ .iov_base = &status,
+ .iov_len = sizeof(status),
+ };
+
+ s = iov_to_buf(elem->out_sg, elem->out_num, 0, &ctrl, sizeof(ctrl.class));
+ if (s != sizeof(ctrl.class) ||
+ ctrl.class != VIRTIO_NET_CTRL_MAC_ADDR_SET) {
+ return;
+ }
+ s = iov_to_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
+ if (s != sizeof(status) || status != VIRTIO_NET_OK) {
+ return;
+ }
+
+ status = VIRTIO_NET_ERR;
+ virtio_net_handle_ctrl_iov(vdev, &in, 1, elem->out_sg, elem->out_num);
+ if (status != VIRTIO_NET_OK) {
+ error_report("Bad CVQ processing in model");
+ }
+}
+
+static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
+ .start = vhost_vdpa_start_control_svq,
+ .used_elem_handler = vhost_vdpa_net_handle_ctrl,
+};
+
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
const char *device,
const char *name,
@@ -232,6 +328,9 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.index = queue_pair_index;
s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_vqs_enabled = svq;
+ if (!is_datapath) {
+ s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
+ }
s->vhost_vdpa.iova_tree = iova_tree;
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
@@ -322,8 +421,15 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
}
vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
if (opts->x_svq) {
- if (has_cvq) {
- error_setg(errp, "vdpa svq does not work with cvq");
+ uint64_t invalid_dev_features = features &
+ ~vdpa_svq_device_features &
+ /* Transport are all accepted at this point */
+ ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
+ VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
+
+ if (invalid_dev_features) {
+ error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
+ invalid_dev_features);
goto err_svq;
}
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
--
2.27.0
next prev parent reply other threads:[~2022-02-14 19:57 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-14 19:16 [RFC PATCH 0/9] Net Control VQ support in vDPA SVQ Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 1/9] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 2/9] vdpa: Extract get geatures part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 3/9] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 4/9] vhost: Add SVQElement Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 5/9] vhost: Add custom used buffer callback Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 6/9] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 7/9] vhost: Add vhost_svq_inject Eugenio Pérez
2022-02-15 9:46 ` Eugenio Perez Martin
2022-02-14 19:16 ` [RFC PATCH 8/9] vhost: Add vhost_svq_start_op Eugenio Pérez
2022-02-14 19:16 ` Eugenio Pérez [this message]
2022-02-15 15:51 ` [RFC PATCH 0/9] Net Control VQ support in vDPA SVQ Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220214191635.1604932-10-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=eli@mellanox.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).