qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	Gautam Dawar <gdawar@xilinx.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Peter Xu <peterx@redhat.com>, Eli Cohen <eli@mellanox.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>,
	Eric Blake <eblake@redhat.com>,
	Liuxiangdong <liuxiangdong5@huawei.com>
Subject: [RFC PATCH v3 10/19] vhost: Add custom used buffer callback
Date: Wed, 30 Mar 2022 20:31:07 +0200	[thread overview]
Message-ID: <20220330183116.358598-11-eperezma@redhat.com> (raw)
In-Reply-To: <20220330183116.358598-1-eperezma@redhat.com>

The callback allows SVQ users to know the VirtQueue requests and
responses. QEMU can use this to synchronize virtio device model state,
allowing to migrate it with minimum changes to the migration code.

In the case of networking, this will be used to inspect control
virtqueue messages.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.h | 16 +++++++++++++++-
 include/hw/virtio/vhost-vdpa.h     |  2 ++
 hw/virtio/vhost-shadow-virtqueue.c |  9 ++++++++-
 hw/virtio/vhost-vdpa.c             |  3 ++-
 4 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 72aadb0aec..4ff6a0cda0 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -19,6 +19,13 @@ typedef struct SVQElement {
     VirtQueueElement elem;
 } SVQElement;
 
+typedef void (*VirtQueueElementCallback)(VirtIODevice *vdev,
+                                         const VirtQueueElement *elem);
+
+typedef struct VhostShadowVirtqueueOps {
+    VirtQueueElementCallback used_elem_handler;
+} VhostShadowVirtqueueOps;
+
 /* Shadow virtqueue to relay notifications */
 typedef struct VhostShadowVirtqueue {
     /* Shadow vring */
@@ -57,6 +64,12 @@ typedef struct VhostShadowVirtqueue {
     /* Next VirtQueue element that guest made available */
     SVQElement *next_guest_avail_elem;
 
+    /* Optional callbacks */
+    const VhostShadowVirtqueueOps *ops;
+
+    /* Optional custom used virtqueue element handler */
+    VirtQueueElementCallback used_elem_cb;
+
     /* Next head to expose to the device */
     uint16_t shadow_avail_idx;
 
@@ -83,7 +96,8 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
                      VirtQueue *vq);
 void vhost_svq_stop(VhostShadowVirtqueue *svq);
 
-VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree);
+VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
+                                    const VhostShadowVirtqueueOps *ops);
 
 void vhost_svq_free(gpointer vq);
 G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free);
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index a29dbb3f53..f1ba46a860 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -17,6 +17,7 @@
 #include "hw/virtio/vhost-iova-tree.h"
 #include "hw/virtio/virtio.h"
 #include "standard-headers/linux/vhost_types.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
 
 typedef struct VhostVDPAHostNotifier {
     MemoryRegion mr;
@@ -35,6 +36,7 @@ typedef struct vhost_vdpa {
     /* IOVA mapping used by the Shadow Virtqueue */
     VhostIOVATree *iova_tree;
     GPtrArray *shadow_vqs;
+    const VhostShadowVirtqueueOps *shadow_vq_ops;
     struct vhost_dev *dev;
     VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
 } VhostVDPA;
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 37e80c5ee0..112d0daf20 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -406,6 +406,10 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
                 return;
             }
             virtqueue_fill(vq, elem, len, i++);
+
+            if (svq->ops && svq->ops->used_elem_handler) {
+                svq->ops->used_elem_handler(svq->vdev, elem);
+            }
         }
 
         virtqueue_flush(vq, i);
@@ -592,12 +596,14 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
  * shadow methods and file descriptors.
  *
  * @iova_tree: Tree to perform descriptors translations
+ * @ops: SVQ operations hooks
  *
  * Returns the new virtqueue or NULL.
  *
  * In case of error, reason is reported through error_report.
  */
-VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
+VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
+                                    const VhostShadowVirtqueueOps *ops)
 {
     g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
     int r;
@@ -619,6 +625,7 @@ VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
     event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
     event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
     svq->iova_tree = iova_tree;
+    svq->ops = ops;
     return g_steal_pointer(&svq);
 
 err_init_hdev_call:
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 9eeac8fa8e..ebd17b6185 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -409,7 +409,8 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
 
     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
     for (unsigned n = 0; n < hdev->nvqs; ++n) {
-        g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree);
+        g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree,
+                                                            v->shadow_vq_ops);
 
         if (unlikely(!svq)) {
             error_setg(errp, "Cannot create svq %u", n);
-- 
2.27.0



  parent reply	other threads:[~2022-03-30 18:52 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-30 18:30 [RFC PATCH v3 00/19] Net Control VQ support with asid in vDPA SVQ Eugenio Pérez
2022-03-30 18:30 ` [RFC PATCH v3 01/19] util: Return void on iova_tree_remove Eugenio Pérez
2022-03-30 18:30 ` [RFC PATCH v3 02/19] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 03/19] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 04/19] vdpa: Fix index calculus at vhost_vdpa_svqs_start Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 05/19] virtio-net: use g_memdup2() instead of unsafe g_memdup() Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 06/19] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 07/19] vdpa: Extract get geatures part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 08/19] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 09/19] vhost: Add SVQElement Eugenio Pérez
2022-03-30 18:31 ` Eugenio Pérez [this message]
2022-03-30 18:31 ` [RFC PATCH v3 11/19] vdpa: control virtqueue support on shadow virtqueue Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 12/19] vhost: Add vhost_iova_tree_find Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 13/19] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 14/19] vhost: Add vhost_svq_inject Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 15/19] vdpa: add NetClientState->start() callback Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 16/19] vdpa: Add vhost_vdpa_start_control_svq Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 17/19] vhost: Update kernel headers Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 18/19] vdpa: Add asid attribute to vdpa device Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 19/19] vdpa: Add x-cvq-svq Eugenio Pérez

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220330183116.358598-11-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=armbru@redhat.com \
    --cc=cohuck@redhat.com \
    --cc=eblake@redhat.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).