qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Gautam Dawar <gdawar@xilinx.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	"Gonglei \(Arei\)" <arei.gonglei@huawei.com>,
	Eli Cohen <eli@mellanox.com>,
	Liuxiangdong <liuxiangdong5@huawei.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>
Subject: [RFC PATCH v7 14/25] vhost: Add SVQElement
Date: Wed, 13 Apr 2022 18:31:55 +0200	[thread overview]
Message-ID: <20220413163206.1958254-15-eperezma@redhat.com> (raw)
In-Reply-To: <20220413163206.1958254-1-eperezma@redhat.com>

This allows SVQ to add metadata to the different queue elements

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.h |  8 ++++--
 hw/virtio/vhost-shadow-virtqueue.c | 46 ++++++++++++++++--------------
 2 files changed, 31 insertions(+), 23 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index c132c994e9..f35d4b8f90 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -15,6 +15,10 @@
 #include "standard-headers/linux/vhost_types.h"
 #include "hw/virtio/vhost-iova-tree.h"
 
+typedef struct SVQElement {
+    VirtQueueElement elem;
+} SVQElement;
+
 /* Shadow virtqueue to relay notifications */
 typedef struct VhostShadowVirtqueue {
     /* Shadow vring */
@@ -48,10 +52,10 @@ typedef struct VhostShadowVirtqueue {
     VhostIOVATree *iova_tree;
 
     /* Map for use the guest's descriptors */
-    VirtQueueElement **ring_id_maps;
+    SVQElement **ring_id_maps;
 
     /* Next VirtQueue element that guest made available */
-    VirtQueueElement *next_guest_avail_elem;
+    SVQElement *next_guest_avail_elem;
 
     /*
      * Backup next field for each descriptor so we can recover securely, not
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index f874374651..1702365475 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -159,9 +159,10 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
     return true;
 }
 
-static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
-                                VirtQueueElement *elem, unsigned *head)
+static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, SVQElement *svq_elem,
+                                unsigned *head)
 {
+    const VirtQueueElement *elem = &svq_elem->elem;
     unsigned avail_idx;
     vring_avail_t *avail = svq->vring.avail;
     bool ok;
@@ -203,7 +204,7 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
     return true;
 }
 
-static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
+static bool vhost_svq_add(VhostShadowVirtqueue *svq, SVQElement *elem)
 {
     unsigned qemu_head;
     bool ok = vhost_svq_add_split(svq, elem, &qemu_head);
@@ -252,19 +253,21 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
         virtio_queue_set_notification(svq->vq, false);
 
         while (true) {
+            SVQElement *svq_elem;
             VirtQueueElement *elem;
             bool ok;
 
             if (svq->next_guest_avail_elem) {
-                elem = g_steal_pointer(&svq->next_guest_avail_elem);
+                svq_elem = g_steal_pointer(&svq->next_guest_avail_elem);
             } else {
-                elem = virtqueue_pop(svq->vq, sizeof(*elem));
+                svq_elem = virtqueue_pop(svq->vq, sizeof(*svq_elem));
             }
 
-            if (!elem) {
+            if (!svq_elem) {
                 break;
             }
 
+            elem = &svq_elem->elem;
             if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) {
                 /*
                  * This condition is possible since a contiguous buffer in GPA
@@ -277,11 +280,11 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
                  * queue the current guest descriptor and ignore further kicks
                  * until some elements are used.
                  */
-                svq->next_guest_avail_elem = elem;
+                svq->next_guest_avail_elem = svq_elem;
                 return;
             }
 
-            ok = vhost_svq_add(svq, elem);
+            ok = vhost_svq_add(svq, svq_elem);
             if (unlikely(!ok)) {
                 /* VQ is broken, just return and ignore any other kicks */
                 return;
@@ -348,8 +351,7 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
     return i;
 }
 
-static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
-                                           uint32_t *len)
+static SVQElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len)
 {
     const vring_used_t *used = svq->vring.used;
     vring_used_elem_t used_elem;
@@ -379,8 +381,8 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
         return NULL;
     }
 
-    num = svq->ring_id_maps[used_elem.id]->in_num +
-          svq->ring_id_maps[used_elem.id]->out_num;
+    num = svq->ring_id_maps[used_elem.id]->elem.in_num +
+          svq->ring_id_maps[used_elem.id]->elem.out_num;
     last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
     svq->desc_next[last_used_chain] = svq->free_head;
     svq->free_head = used_elem.id;
@@ -401,11 +403,13 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
         vhost_svq_disable_notification(svq);
         while (true) {
             uint32_t len;
-            g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
-            if (!elem) {
+            g_autofree SVQElement *svq_elem = vhost_svq_get_buf(svq, &len);
+            VirtQueueElement *elem;
+            if (!svq_elem) {
                 break;
             }
 
+            elem = &svq_elem->elem;
             if (unlikely(i >= svq->vring.num)) {
                 qemu_log_mask(LOG_GUEST_ERROR,
                          "More than %u used buffers obtained in a %u size SVQ",
@@ -556,7 +560,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
     memset(svq->vring.desc, 0, driver_size);
     svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
     memset(svq->vring.used, 0, device_size);
-    svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
+    svq->ring_id_maps = g_new0(SVQElement *, svq->vring.num);
     svq->desc_next = g_new0(uint16_t, svq->vring.num);
     for (unsigned i = 0; i < svq->vring.num - 1; i++) {
         svq->desc_next[i] = cpu_to_le16(i + 1);
@@ -570,7 +574,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
 void vhost_svq_stop(VhostShadowVirtqueue *svq)
 {
     event_notifier_set_handler(&svq->svq_kick, NULL);
-    g_autofree VirtQueueElement *next_avail_elem = NULL;
+    g_autofree SVQElement *next_avail_elem = NULL;
 
     if (!svq->vq) {
         return;
@@ -580,16 +584,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
     vhost_svq_flush(svq, false);
 
     for (unsigned i = 0; i < svq->vring.num; ++i) {
-        g_autofree VirtQueueElement *elem = NULL;
-        elem = g_steal_pointer(&svq->ring_id_maps[i]);
-        if (elem) {
-            virtqueue_detach_element(svq->vq, elem, 0);
+        g_autofree SVQElement *svq_elem = NULL;
+        svq_elem = g_steal_pointer(&svq->ring_id_maps[i]);
+        if (svq_elem) {
+            virtqueue_detach_element(svq->vq, &svq_elem->elem, 0);
         }
     }
 
     next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
     if (next_avail_elem) {
-        virtqueue_detach_element(svq->vq, next_avail_elem, 0);
+        virtqueue_detach_element(svq->vq, &next_avail_elem->elem, 0);
     }
     svq->vq = NULL;
     g_free(svq->desc_next);
-- 
2.27.0



  parent reply	other threads:[~2022-04-13 16:47 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-13 16:31 [RFC PATCH v7 00/25] Net Control VQ support with asid in vDPA SVQ Eugenio Pérez
2022-04-13 16:31 ` [RFC PATCH v7 01/25] vhost: Track descriptor chain in private at SVQ Eugenio Pérez
2022-04-14  3:48   ` Jason Wang
2022-04-22 14:16     ` Eugenio Perez Martin
2022-04-13 16:31 ` [RFC PATCH v7 02/25] vdpa: Add missing tracing to batch mapping functions Eugenio Pérez
2022-04-14  3:49   ` Jason Wang
2022-04-13 16:31 ` [RFC PATCH v7 03/25] vdpa: Fix bad index calculus at vhost_vdpa_get_vring_base Eugenio Pérez
2022-04-14  3:50   ` Jason Wang
2022-04-13 16:31 ` [RFC PATCH v7 04/25] util: Return void on iova_tree_remove Eugenio Pérez
2022-04-14  3:50   ` Jason Wang
2022-04-13 16:31 ` [RFC PATCH v7 05/25] hw/virtio: Replace g_memdup() by g_memdup2() Eugenio Pérez
2022-04-14  3:51   ` Jason Wang
2022-04-14  4:01   ` Jason Wang
2022-04-13 16:31 ` [RFC PATCH v7 06/25] vdpa: Send all updates in memory listener commit Eugenio Pérez
2022-04-14  4:11   ` Jason Wang
2022-04-22  9:17     ` Eugenio Perez Martin
2022-04-13 16:31 ` [RFC PATCH v7 07/25] vhost: Add reference counting to vhost_iova_tree Eugenio Pérez
2022-04-14  5:30   ` Jason Wang
2022-04-13 16:31 ` [RFC PATCH v7 08/25] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-04-14  5:32   ` Jason Wang
2022-04-18 10:36     ` Eugenio Perez Martin
2022-04-13 16:31 ` [RFC PATCH v7 09/25] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-04-14  5:48   ` Jason Wang
2022-04-13 16:31 ` [RFC PATCH v7 10/25] vdpa: Fix index calculus at vhost_vdpa_svqs_start Eugenio Pérez
2022-04-14  5:59   ` Jason Wang
2022-04-13 16:31 ` [RFC PATCH v7 11/25] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-04-13 16:31 ` [RFC PATCH v7 12/25] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-04-13 16:31 ` [RFC PATCH v7 13/25] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-04-13 16:31 ` Eugenio Pérez [this message]
2022-04-13 16:31 ` [RFC PATCH v7 15/25] vhost: Add custom used buffer callback Eugenio Pérez
2022-04-13 16:31 ` [RFC PATCH v7 16/25] vdpa: control virtqueue support on shadow virtqueue Eugenio Pérez
2022-04-14  9:10   ` Jason Wang
2022-04-18 10:55     ` Eugenio Perez Martin
2022-04-13 16:31 ` [RFC PATCH v7 17/25] vhost: Add vhost_iova_tree_find Eugenio Pérez
2022-04-13 16:31 ` [RFC PATCH v7 18/25] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-04-14  9:13   ` Jason Wang
2022-04-13 16:32 ` [RFC PATCH v7 19/25] vhost: Add vhost_svq_inject Eugenio Pérez
2022-04-14  9:09   ` Jason Wang
2022-04-18 13:58     ` Eugenio Perez Martin
2022-04-13 16:32 ` [RFC PATCH v7 20/25] vdpa: add NetClientState->start() callback Eugenio Pérez
2022-04-14  9:14   ` Jason Wang
2022-04-13 16:32 ` [RFC PATCH v7 21/25] vdpa: Add vhost_vdpa_start_control_svq Eugenio Pérez
2022-04-13 16:32 ` [RFC PATCH v7 22/25] vhost: Update kernel headers Eugenio Pérez
2022-04-13 16:32 ` [RFC PATCH v7 23/25] vhost: Make possible to check for device exclusive vq group Eugenio Pérez
2022-04-13 16:32 ` [RFC PATCH v7 24/25] vdpa: Add asid attribute to vdpa device Eugenio Pérez
2022-04-14  9:10   ` Jason Wang
2022-04-18 14:03     ` Eugenio Perez Martin
2022-04-13 16:32 ` [RFC PATCH v7 25/25] vdpa: Add x-cvq-svq Eugenio Pérez
2022-04-14  9:09   ` Jason Wang
2022-04-18 14:16     ` Eugenio Perez Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220413163206.1958254-15-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=arei.gonglei@huawei.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).