From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Gautam Dawar <gdawar@xilinx.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Eli Cohen <eli@mellanox.com>,
Zhu Lingshan <lingshan.zhu@intel.com>
Subject: [RFC PATCH 5/9] vhost: Add custom used buffer callback
Date: Mon, 14 Feb 2022 20:16:31 +0100 [thread overview]
Message-ID: <20220214191635.1604932-6-eperezma@redhat.com> (raw)
In-Reply-To: <20220214191635.1604932-1-eperezma@redhat.com>
The callback allows SVQ users to know the VirtQueue requests and
responses. QEMU can use this to synchronize virtio device model state,
allowing to migrate it with minimum changes to the migration code.
In the case of networking, this will be used to inspect control
virtqueue messages.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.h | 10 +++++++++-
include/hw/virtio/vhost-vdpa.h | 2 ++
hw/virtio/vhost-shadow-virtqueue.c | 15 ++++++++++++++-
hw/virtio/vhost-vdpa.c | 4 ++--
4 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index a2b0c6434d..f23fb93c20 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -16,6 +16,13 @@
typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
+typedef void (*VirtQueueElementCallback)(VirtIODevice *vdev,
+ const VirtQueueElement *elem);
+
+typedef struct VhostShadowVirtqueueOps {
+ VirtQueueElementCallback used_elem_handler;
+} VhostShadowVirtqueueOps;
+
bool vhost_svq_valid_device_features(uint64_t *features);
bool vhost_svq_valid_guest_features(uint64_t *features);
bool vhost_svq_ack_guest_features(uint64_t dev_features,
@@ -39,7 +46,8 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq);
void vhost_svq_stop(VhostShadowVirtqueue *svq);
-VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize, VhostIOVATree *iova_map);
+VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize, VhostIOVATree *iova_map,
+ const VhostShadowVirtqueueOps *ops);
void vhost_svq_free(VhostShadowVirtqueue *vq);
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index cd2388b3be..a0271534e6 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -17,6 +17,7 @@
#include "hw/virtio/vhost-iova-tree.h"
#include "hw/virtio/virtio.h"
#include "standard-headers/linux/vhost_types.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
typedef struct VhostVDPAHostNotifier {
MemoryRegion mr;
@@ -34,6 +35,7 @@ typedef struct vhost_vdpa {
/* IOVA mapping used by Shadow Virtqueue */
VhostIOVATree *iova_tree;
GPtrArray *shadow_vqs;
+ const VhostShadowVirtqueueOps *shadow_vq_ops;
struct vhost_dev *dev;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
} VhostVDPA;
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index ea32b7ae7f..5665947d1a 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -59,6 +59,12 @@ struct VhostShadowVirtqueue {
/* Next VirtQueue element that guest made available */
SVQElement *next_guest_avail_elem;
+ /* Optional callbacks */
+ const VhostShadowVirtqueueOps *ops;
+
+ /* Optional custom used virtqueue element handler */
+ VirtQueueElementCallback used_elem_cb;
+
/* Next head to expose to device */
uint16_t avail_idx_shadow;
@@ -509,6 +515,10 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
i = 0;
}
virtqueue_fill(vq, elem, elem->len, i++);
+
+ if (svq->ops && svq->ops->used_elem_handler) {
+ svq->ops->used_elem_handler(svq->vdev, elem);
+ }
}
virtqueue_flush(vq, i);
@@ -707,12 +717,14 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
*
* @qsize Shadow VirtQueue size
* @iova_tree Tree to perform descriptors translations
+ * @used_cb Optional callback for each device's used buffer
*
* Returns the new virtqueue or NULL.
*
* In case of error, reason is reported through error_report.
*/
-VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize, VhostIOVATree *iova_tree)
+VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize, VhostIOVATree *iova_tree,
+ const VhostShadowVirtqueueOps *ops)
{
size_t desc_size = sizeof(vring_desc_t) * qsize;
size_t device_size, driver_size;
@@ -747,6 +759,7 @@ VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize, VhostIOVATree *iova_tree)
svq->iova_tree = iova_tree;
svq->ring_id_maps = g_new0(SVQElement *, qsize);
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
+ svq->ops = ops;
return g_steal_pointer(&svq);
err_init_hdev_call:
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index c64a6802b7..5707b1952d 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -17,7 +17,6 @@
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-backend.h"
#include "hw/virtio/virtio-net.h"
-#include "hw/virtio/vhost-shadow-virtqueue.h"
#include "hw/virtio/vhost-vdpa.h"
#include "exec/address-spaces.h"
#include "qemu/main-loop.h"
@@ -1219,7 +1218,8 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
for (unsigned n = 0; n < hdev->nvqs; ++n) {
DMAMap device_region, driver_region;
struct vhost_vring_addr addr;
- VhostShadowVirtqueue *svq = vhost_svq_new(qsize, v->iova_tree);
+ VhostShadowVirtqueue *svq = vhost_svq_new(qsize, v->iova_tree,
+ v->shadow_vq_ops);
if (unlikely(!svq)) {
error_setg(errp, "Cannot create svq %u", n);
return -1;
--
2.27.0
next prev parent reply other threads:[~2022-02-14 19:40 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-14 19:16 [RFC PATCH 0/9] Net Control VQ support in vDPA SVQ Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 1/9] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 2/9] vdpa: Extract get geatures part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 3/9] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 4/9] vhost: Add SVQElement Eugenio Pérez
2022-02-14 19:16 ` Eugenio Pérez [this message]
2022-02-14 19:16 ` [RFC PATCH 6/9] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 7/9] vhost: Add vhost_svq_inject Eugenio Pérez
2022-02-15 9:46 ` Eugenio Perez Martin
2022-02-14 19:16 ` [RFC PATCH 8/9] vhost: Add vhost_svq_start_op Eugenio Pérez
2022-02-14 19:16 ` [RFC PATCH 9/9] vdpa: control virtqueue support on shadow virtqueue Eugenio Pérez
2022-02-15 15:51 ` [RFC PATCH 0/9] Net Control VQ support in vDPA SVQ Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220214191635.1604932-6-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=eli@mellanox.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).