qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	Gautam Dawar <gdawar@xilinx.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Peter Xu <peterx@redhat.com>, Eli Cohen <eli@mellanox.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>,
	Eric Blake <eblake@redhat.com>,
	Liuxiangdong <liuxiangdong5@huawei.com>
Subject: [RFC PATCH v3 09/19] vhost: Add SVQElement
Date: Wed, 30 Mar 2022 20:31:06 +0200	[thread overview]
Message-ID: <20220330183116.358598-10-eperezma@redhat.com> (raw)
In-Reply-To: <20220330183116.358598-1-eperezma@redhat.com>

This allows SVQ to add metadata to the different queue elements

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.h |  8 ++++--
 hw/virtio/vhost-shadow-virtqueue.c | 42 ++++++++++++++++--------------
 2 files changed, 29 insertions(+), 21 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index e5e24c536d..72aadb0aec 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -15,6 +15,10 @@
 #include "standard-headers/linux/vhost_types.h"
 #include "hw/virtio/vhost-iova-tree.h"
 
+typedef struct SVQElement {
+    VirtQueueElement elem;
+} SVQElement;
+
 /* Shadow virtqueue to relay notifications */
 typedef struct VhostShadowVirtqueue {
     /* Shadow vring */
@@ -48,10 +52,10 @@ typedef struct VhostShadowVirtqueue {
     VhostIOVATree *iova_tree;
 
     /* Map for use the guest's descriptors */
-    VirtQueueElement **ring_id_maps;
+    SVQElement **ring_id_maps;
 
     /* Next VirtQueue element that guest made available */
-    VirtQueueElement *next_guest_avail_elem;
+    SVQElement *next_guest_avail_elem;
 
     /* Next head to expose to the device */
     uint16_t shadow_avail_idx;
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 349255525f..37e80c5ee0 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -158,9 +158,10 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
     return true;
 }
 
-static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
-                                VirtQueueElement *elem, unsigned *head)
+static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, SVQElement *svq_elem,
+                                unsigned *head)
 {
+    const VirtQueueElement *elem = &svq_elem->elem;
     unsigned avail_idx;
     vring_avail_t *avail = svq->vring.avail;
     bool ok;
@@ -202,7 +203,7 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
     return true;
 }
 
-static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
+static bool vhost_svq_add(VhostShadowVirtqueue *svq, SVQElement *elem)
 {
     unsigned qemu_head;
     bool ok = vhost_svq_add_split(svq, elem, &qemu_head);
@@ -251,19 +252,21 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
         virtio_queue_set_notification(svq->vq, false);
 
         while (true) {
+            SVQElement *svq_elem;
             VirtQueueElement *elem;
             bool ok;
 
             if (svq->next_guest_avail_elem) {
-                elem = g_steal_pointer(&svq->next_guest_avail_elem);
+                svq_elem = g_steal_pointer(&svq->next_guest_avail_elem);
             } else {
-                elem = virtqueue_pop(svq->vq, sizeof(*elem));
+                svq_elem = virtqueue_pop(svq->vq, sizeof(*svq_elem));
             }
 
-            if (!elem) {
+            if (!svq_elem) {
                 break;
             }
 
+            elem = &svq_elem->elem;
             if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) {
                 /*
                  * This condition is possible since a contiguous buffer in GPA
@@ -276,11 +279,11 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
                  * queue the current guest descriptor and ignore further kicks
                  * until some elements are used.
                  */
-                svq->next_guest_avail_elem = elem;
+                svq->next_guest_avail_elem = svq_elem;
                 return;
             }
 
-            ok = vhost_svq_add(svq, elem);
+            ok = vhost_svq_add(svq, svq_elem);
             if (unlikely(!ok)) {
                 /* VQ is broken, just return and ignore any other kicks */
                 return;
@@ -337,8 +340,7 @@ static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
     svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
 }
 
-static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
-                                           uint32_t *len)
+static SVQElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len)
 {
     vring_desc_t *descs = svq->vring.desc;
     const vring_used_t *used = svq->vring.used;
@@ -388,11 +390,13 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
         vhost_svq_disable_notification(svq);
         while (true) {
             uint32_t len;
-            g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
-            if (!elem) {
+            g_autofree SVQElement *svq_elem = vhost_svq_get_buf(svq, &len);
+            VirtQueueElement *elem;
+            if (!svq_elem) {
                 break;
             }
 
+            elem = &svq_elem->elem;
             if (unlikely(i >= svq->vring.num)) {
                 qemu_log_mask(LOG_GUEST_ERROR,
                          "More than %u used buffers obtained in a %u size SVQ",
@@ -543,7 +547,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
     memset(svq->vring.desc, 0, driver_size);
     svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
     memset(svq->vring.used, 0, device_size);
-    svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
+    svq->ring_id_maps = g_new0(SVQElement *, svq->vring.num);
     for (unsigned i = 0; i < svq->vring.num - 1; i++) {
         svq->vring.desc[i].next = cpu_to_le16(i + 1);
     }
@@ -556,7 +560,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
 void vhost_svq_stop(VhostShadowVirtqueue *svq)
 {
     event_notifier_set_handler(&svq->svq_kick, NULL);
-    g_autofree VirtQueueElement *next_avail_elem = NULL;
+    g_autofree SVQElement *next_avail_elem = NULL;
 
     if (!svq->vq) {
         return;
@@ -566,16 +570,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
     vhost_svq_flush(svq, false);
 
     for (unsigned i = 0; i < svq->vring.num; ++i) {
-        g_autofree VirtQueueElement *elem = NULL;
-        elem = g_steal_pointer(&svq->ring_id_maps[i]);
-        if (elem) {
-            virtqueue_detach_element(svq->vq, elem, 0);
+        g_autofree SVQElement *svq_elem = NULL;
+        svq_elem = g_steal_pointer(&svq->ring_id_maps[i]);
+        if (svq_elem) {
+            virtqueue_detach_element(svq->vq, &svq_elem->elem, 0);
         }
     }
 
     next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
     if (next_avail_elem) {
-        virtqueue_detach_element(svq->vq, next_avail_elem, 0);
+        virtqueue_detach_element(svq->vq, &next_avail_elem->elem, 0);
     }
     svq->vq = NULL;
     g_free(svq->ring_id_maps);
-- 
2.27.0



  parent reply	other threads:[~2022-03-30 18:50 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-30 18:30 [RFC PATCH v3 00/19] Net Control VQ support with asid in vDPA SVQ Eugenio Pérez
2022-03-30 18:30 ` [RFC PATCH v3 01/19] util: Return void on iova_tree_remove Eugenio Pérez
2022-03-30 18:30 ` [RFC PATCH v3 02/19] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 03/19] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 04/19] vdpa: Fix index calculus at vhost_vdpa_svqs_start Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 05/19] virtio-net: use g_memdup2() instead of unsafe g_memdup() Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 06/19] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 07/19] vdpa: Extract get geatures part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 08/19] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-03-30 18:31 ` Eugenio Pérez [this message]
2022-03-30 18:31 ` [RFC PATCH v3 10/19] vhost: Add custom used buffer callback Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 11/19] vdpa: control virtqueue support on shadow virtqueue Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 12/19] vhost: Add vhost_iova_tree_find Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 13/19] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 14/19] vhost: Add vhost_svq_inject Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 15/19] vdpa: add NetClientState->start() callback Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 16/19] vdpa: Add vhost_vdpa_start_control_svq Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 17/19] vhost: Update kernel headers Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 18/19] vdpa: Add asid attribute to vdpa device Eugenio Pérez
2022-03-30 18:31 ` [RFC PATCH v3 19/19] vdpa: Add x-cvq-svq Eugenio Pérez

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220330183116.358598-10-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=armbru@redhat.com \
    --cc=cohuck@redhat.com \
    --cc=eblake@redhat.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).