From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Eric Blake <eblake@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Liuxiangdong <liuxiangdong5@huawei.com>,
Cindy Lu <lulu@redhat.com>, Zhu Lingshan <lingshan.zhu@intel.com>,
"Gonglei (Arei)" <arei.gonglei@huawei.com>,
Laurent Vivier <lvivier@redhat.com>,
Gautam Dawar <gdawar@xilinx.com>,
Stefano Garzarella <sgarzare@redhat.com>,
Markus Armbruster <armbru@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Jason Wang <jasowang@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Eli Cohen <eli@mellanox.com>, Parav Pandit <parav@mellanox.com>,
Cornelia Huck <cohuck@redhat.com>
Subject: [PATCH 07/22] vhost: Add SVQElement
Date: Fri, 8 Jul 2022 12:49:58 +0200 [thread overview]
Message-ID: <20220708105013.1899854-8-eperezma@redhat.com> (raw)
In-Reply-To: <20220708105013.1899854-1-eperezma@redhat.com>
This will allow SVQ to add metadata to the different queue elements. To
simplify changes, only store actual element at this patch.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.h | 8 ++++--
hw/virtio/vhost-shadow-virtqueue.c | 41 ++++++++++++++++++++----------
2 files changed, 33 insertions(+), 16 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index c132c994e9..0b34f48037 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -15,6 +15,10 @@
#include "standard-headers/linux/vhost_types.h"
#include "hw/virtio/vhost-iova-tree.h"
+typedef struct SVQElement {
+ VirtQueueElement *elem;
+} SVQElement;
+
/* Shadow virtqueue to relay notifications */
typedef struct VhostShadowVirtqueue {
/* Shadow vring */
@@ -47,8 +51,8 @@ typedef struct VhostShadowVirtqueue {
/* IOVA mapping */
VhostIOVATree *iova_tree;
- /* Map for use the guest's descriptors */
- VirtQueueElement **ring_id_maps;
+ /* Each element context */
+ SVQElement *ring_id_maps;
/* Next VirtQueue element that guest made available */
VirtQueueElement *next_guest_avail_elem;
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index a4d5d7bae0..d50e1383f5 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -246,7 +246,7 @@ static bool vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
return false;
}
- svq->ring_id_maps[qemu_head] = elem;
+ svq->ring_id_maps[qemu_head].elem = elem;
return true;
}
@@ -384,15 +384,25 @@ static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
}
-static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
- uint32_t *len)
+static bool vhost_svq_is_empty_elem(SVQElement elem)
+{
+ return elem.elem == NULL;
+}
+
+static SVQElement vhost_svq_empty_elem(void)
+{
+ return (SVQElement){};
+}
+
+static SVQElement vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len)
{
const vring_used_t *used = svq->vring.used;
vring_used_elem_t used_elem;
+ SVQElement svq_elem = vhost_svq_empty_elem();
uint16_t last_used, last_used_chain, num;
if (!vhost_svq_more_used(svq)) {
- return NULL;
+ return svq_elem;
}
/* Only get used array entries after they have been exposed by dev */
@@ -405,24 +415,25 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
if (unlikely(used_elem.id >= svq->vring.num)) {
qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
svq->vdev->name, used_elem.id);
- return NULL;
+ return svq_elem;
}
- if (unlikely(!svq->ring_id_maps[used_elem.id])) {
+ svq_elem = svq->ring_id_maps[used_elem.id];
+ svq->ring_id_maps[used_elem.id] = vhost_svq_empty_elem();
+ if (unlikely(vhost_svq_is_empty_elem(svq_elem))) {
qemu_log_mask(LOG_GUEST_ERROR,
"Device %s says index %u is used, but it was not available",
svq->vdev->name, used_elem.id);
- return NULL;
+ return svq_elem;
}
- num = svq->ring_id_maps[used_elem.id]->in_num +
- svq->ring_id_maps[used_elem.id]->out_num;
+ num = svq_elem.elem->in_num + svq_elem.elem->out_num;
last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
svq->desc_next[last_used_chain] = svq->free_head;
svq->free_head = used_elem.id;
*len = used_elem.len;
- return g_steal_pointer(&svq->ring_id_maps[used_elem.id]);
+ return svq_elem;
}
static void vhost_svq_flush(VhostShadowVirtqueue *svq,
@@ -437,6 +448,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
vhost_svq_disable_notification(svq);
while (true) {
uint32_t len;
+ SVQElement svq_elem;
g_autofree VirtQueueElement *elem = NULL;
if (unlikely(i >= svq->vring.num)) {
@@ -447,11 +459,12 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
return;
}
- elem = vhost_svq_get_buf(svq, &len);
- if (!elem) {
+ svq_elem = vhost_svq_get_buf(svq, &len);
+ if (vhost_svq_is_empty_elem(svq_elem)) {
break;
}
+ elem = g_steal_pointer(&svq_elem.elem);
virtqueue_fill(vq, elem, len, i++);
}
@@ -594,7 +607,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
memset(svq->vring.desc, 0, driver_size);
svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
memset(svq->vring.used, 0, device_size);
- svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
+ svq->ring_id_maps = g_new0(SVQElement, svq->vring.num);
svq->desc_next = g_new0(uint16_t, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
svq->desc_next[i] = cpu_to_le16(i + 1);
@@ -619,7 +632,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
for (unsigned i = 0; i < svq->vring.num; ++i) {
g_autofree VirtQueueElement *elem = NULL;
- elem = g_steal_pointer(&svq->ring_id_maps[i]);
+ elem = g_steal_pointer(&svq->ring_id_maps[i].elem);
if (elem) {
virtqueue_detach_element(svq->vq, elem, 0);
}
--
2.31.1
next prev parent reply other threads:[~2022-07-08 10:53 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-08 10:49 [PATCH 00/22] vdpa net devices Rx filter change notification with Shadow VQ Eugenio Pérez
2022-07-08 10:49 ` [PATCH 01/22] vhost: Return earlier if used buffers overrun Eugenio Pérez
2022-07-08 10:49 ` [PATCH 02/22] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-07-08 10:49 ` [PATCH 03/22] vdpa: Clean vhost_vdpa_dev_start(dev, false) Eugenio Pérez
2022-07-08 10:49 ` [PATCH 04/22] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-07-08 10:49 ` [PATCH 05/22] vhost: Decouple vhost_svq_add_split from VirtQueueElement Eugenio Pérez
2022-07-08 10:49 ` [PATCH 06/22] vhost: Reorder vhost_svq_last_desc_of_chain Eugenio Pérez
2022-07-08 10:49 ` Eugenio Pérez [this message]
2022-07-08 10:49 ` [PATCH 08/22] vhost: Move last chain id to SVQ element Eugenio Pérez
2022-07-08 10:50 ` [PATCH 09/22] vhost: Add opaque member to SVQElement Eugenio Pérez
2022-07-08 10:50 ` [PATCH 10/22] vdpa: Small rename of error labels Eugenio Pérez
2022-07-08 10:50 ` [PATCH 11/22] vhost: add vhost_svq_push_elem Eugenio Pérez
2022-07-08 10:50 ` [PATCH 12/22] vhost: Add vhost_svq_inject Eugenio Pérez
2022-07-08 10:50 ` [PATCH 13/22] vhost: add vhost_svq_poll Eugenio Pérez
2022-07-08 10:50 ` [PATCH 14/22] vhost: Add custom used buffer callback Eugenio Pérez
2022-07-08 10:50 ` [PATCH 15/22] vhost: Add svq avail_handler callback Eugenio Pérez
2022-07-08 10:50 ` [PATCH 16/22] vhost: add detach SVQ operation Eugenio Pérez
2022-07-08 10:50 ` [PATCH 17/22] vdpa: Export vhost_vdpa_dma_map and unmap calls Eugenio Pérez
2022-07-08 10:50 ` [PATCH 18/22] vdpa: manual forward CVQ buffers Eugenio Pérez
2022-07-08 10:50 ` [PATCH 19/22] vdpa: Buffer CVQ support on shadow virtqueue Eugenio Pérez
2022-07-08 10:50 ` [PATCH 20/22] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-07-08 10:50 ` [PATCH 21/22] vdpa: Add device migration blocker Eugenio Pérez
2022-07-08 10:50 ` [PATCH 22/22] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-07-08 12:52 ` Markus Armbruster
2022-07-11 7:17 ` Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220708105013.1899854-8-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=arei.gonglei@huawei.com \
--cc=armbru@redhat.com \
--cc=cohuck@redhat.com \
--cc=eblake@redhat.com \
--cc=eli@mellanox.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=liuxiangdong5@huawei.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).