qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Eric Blake <eblake@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Liuxiangdong <liuxiangdong5@huawei.com>,
	Cindy Lu <lulu@redhat.com>, Zhu Lingshan <lingshan.zhu@intel.com>,
	"Gonglei (Arei)" <arei.gonglei@huawei.com>,
	Laurent Vivier <lvivier@redhat.com>,
	Gautam Dawar <gdawar@xilinx.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Jason Wang <jasowang@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Eli Cohen <eli@mellanox.com>, Parav Pandit <parav@mellanox.com>,
	Cornelia Huck <cohuck@redhat.com>
Subject: [PATCH 15/22] vhost: Add svq avail_handler callback
Date: Fri,  8 Jul 2022 12:50:06 +0200	[thread overview]
Message-ID: <20220708105013.1899854-16-eperezma@redhat.com> (raw)
In-Reply-To: <20220708105013.1899854-1-eperezma@redhat.com>

This allows external handlers to be aware of new buffers that the guest
places in the virtqueue.

When this callback is defined the ownership of guest's virtqueue element
is transferred to the callback. This means that if the user wants to
forward the descriptor it needs to manually inject it. The callback is
also free to process the command by itself and use the element with
svq_push.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.h | 23 ++++++++++++++++++++++-
 hw/virtio/vhost-shadow-virtqueue.c | 13 +++++++++++--
 hw/virtio/vhost-vdpa.c             |  2 +-
 3 files changed, 34 insertions(+), 4 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 96ce7aa62e..cfc891e2e8 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -24,11 +24,28 @@ typedef struct SVQElement {
 } SVQElement;
 
 typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
+
+/**
+ * Callback to handle an avail buffer.
+ *
+ * @svq:  Shadow virtqueue
+ * @elem:  Element placed in the queue by the guest
+ * @vq_callback_opaque:  Opaque
+ *
+ * Returns true if the vq is running as expected, false otherwise.
+ *
+ * Note that ownership of elem is transferred to the callback.
+ */
+typedef bool (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq,
+                                       VirtQueueElement *elem,
+                                       void *vq_callback_opaque);
+
 typedef void (*VirtQueueUsedCallback)(VhostShadowVirtqueue *svq,
                                       void *used_elem_opaque,
                                       uint32_t written);
 
 typedef struct VhostShadowVirtqueueOps {
+    VirtQueueAvailCallback avail_handler;
     VirtQueueUsedCallback used_handler;
 } VhostShadowVirtqueueOps;
 
@@ -79,6 +96,9 @@ typedef struct VhostShadowVirtqueue {
     /* Caller callbacks */
     const VhostShadowVirtqueueOps *ops;
 
+    /* Caller callbacks opaque */
+    void *ops_opaque;
+
     /* Next head to expose to the device */
     uint16_t shadow_avail_idx;
 
@@ -111,7 +131,8 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
 void vhost_svq_stop(VhostShadowVirtqueue *svq);
 
 VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
-                                    const VhostShadowVirtqueueOps *ops);
+                                    const VhostShadowVirtqueueOps *ops,
+                                    void *ops_opaque);
 
 void vhost_svq_free(gpointer vq);
 G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free);
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 40183f8afd..78579b9e0b 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -374,7 +374,13 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
                 return;
             }
 
-            ok = vhost_svq_add_element(svq, g_steal_pointer(&elem));
+            if (svq->ops) {
+                ok = svq->ops->avail_handler(svq, g_steal_pointer(&elem),
+                                             svq->ops_opaque);
+            } else {
+                ok = vhost_svq_add_element(svq, g_steal_pointer(&elem));
+            }
+
             if (unlikely(!ok)) {
                 /* VQ is broken, just return and ignore any other kicks */
                 return;
@@ -766,13 +772,15 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
  *
  * @iova_tree: Tree to perform descriptors translations
  * @ops: SVQ owner callbacks
+ * @ops_opaque: ops opaque pointer
  *
  * Returns the new virtqueue or NULL.
  *
  * In case of error, reason is reported through error_report.
  */
 VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
-                                    const VhostShadowVirtqueueOps *ops)
+                                    const VhostShadowVirtqueueOps *ops,
+                                    void *ops_opaque)
 {
     g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
     int r;
@@ -795,6 +803,7 @@ VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
     event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
     svq->iova_tree = iova_tree;
     svq->ops = ops;
+    svq->ops_opaque = ops_opaque;
     return g_steal_pointer(&svq);
 
 err_init_hdev_call:
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 25f7146fe4..9a4f00c114 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -420,7 +420,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
     for (unsigned n = 0; n < hdev->nvqs; ++n) {
         g_autoptr(VhostShadowVirtqueue) svq;
 
-        svq = vhost_svq_new(v->iova_tree, NULL);
+        svq = vhost_svq_new(v->iova_tree, NULL, NULL);
         if (unlikely(!svq)) {
             error_setg(errp, "Cannot create svq %u", n);
             return -1;
-- 
2.31.1



  parent reply	other threads:[~2022-07-08 11:07 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-08 10:49 [PATCH 00/22] vdpa net devices Rx filter change notification with Shadow VQ Eugenio Pérez
2022-07-08 10:49 ` [PATCH 01/22] vhost: Return earlier if used buffers overrun Eugenio Pérez
2022-07-08 10:49 ` [PATCH 02/22] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-07-08 10:49 ` [PATCH 03/22] vdpa: Clean vhost_vdpa_dev_start(dev, false) Eugenio Pérez
2022-07-08 10:49 ` [PATCH 04/22] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-07-08 10:49 ` [PATCH 05/22] vhost: Decouple vhost_svq_add_split from VirtQueueElement Eugenio Pérez
2022-07-08 10:49 ` [PATCH 06/22] vhost: Reorder vhost_svq_last_desc_of_chain Eugenio Pérez
2022-07-08 10:49 ` [PATCH 07/22] vhost: Add SVQElement Eugenio Pérez
2022-07-08 10:49 ` [PATCH 08/22] vhost: Move last chain id to SVQ element Eugenio Pérez
2022-07-08 10:50 ` [PATCH 09/22] vhost: Add opaque member to SVQElement Eugenio Pérez
2022-07-08 10:50 ` [PATCH 10/22] vdpa: Small rename of error labels Eugenio Pérez
2022-07-08 10:50 ` [PATCH 11/22] vhost: add vhost_svq_push_elem Eugenio Pérez
2022-07-08 10:50 ` [PATCH 12/22] vhost: Add vhost_svq_inject Eugenio Pérez
2022-07-08 10:50 ` [PATCH 13/22] vhost: add vhost_svq_poll Eugenio Pérez
2022-07-08 10:50 ` [PATCH 14/22] vhost: Add custom used buffer callback Eugenio Pérez
2022-07-08 10:50 ` Eugenio Pérez [this message]
2022-07-08 10:50 ` [PATCH 16/22] vhost: add detach SVQ operation Eugenio Pérez
2022-07-08 10:50 ` [PATCH 17/22] vdpa: Export vhost_vdpa_dma_map and unmap calls Eugenio Pérez
2022-07-08 10:50 ` [PATCH 18/22] vdpa: manual forward CVQ buffers Eugenio Pérez
2022-07-08 10:50 ` [PATCH 19/22] vdpa: Buffer CVQ support on shadow virtqueue Eugenio Pérez
2022-07-08 10:50 ` [PATCH 20/22] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-07-08 10:50 ` [PATCH 21/22] vdpa: Add device migration blocker Eugenio Pérez
2022-07-08 10:50 ` [PATCH 22/22] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-07-08 12:52   ` Markus Armbruster
2022-07-11  7:17     ` Eugenio Perez Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220708105013.1899854-16-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=arei.gonglei@huawei.com \
    --cc=armbru@redhat.com \
    --cc=cohuck@redhat.com \
    --cc=eblake@redhat.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).