From: Sahil Siddiq <icegambit91@gmail.com>
To: eperezma@redhat.com, sgarzare@redhat.com
Cc: mst@redhat.com, qemu-devel@nongnu.org, Sahil Siddiq <sahilcdq@proton.me>
Subject: [RFC v2 1/3] vhost: Introduce packed vq and add buffer elements
Date: Fri, 26 Jul 2024 15:28:20 +0530 [thread overview]
Message-ID: <20240726095822.104017-2-sahilcdq@proton.me> (raw)
In-Reply-To: <20240726095822.104017-1-sahilcdq@proton.me>
This is the first patch in a series to add support for packed
virtqueues in vhost_shadow_virtqueue. This patch implements the
insertion of available buffers in the descriptor area. It takes
into account descriptor chains, but does not consider indirect
descriptors.
Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes v1 -> v2:
* Split commit from RFC v1 into two commits.
* vhost-shadow-virtqueue.c
(vhost_svq_add_packed):
- Merge with "vhost_svq_vring_write_descs_packed()"
- Remove "num == 0" check
hw/virtio/vhost-shadow-virtqueue.c | 93 +++++++++++++++++++++++++++++-
1 file changed, 92 insertions(+), 1 deletion(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index fc5f408f77..c7b7e0c477 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -217,6 +217,91 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
return true;
}
+static bool vhost_svq_add_packed(VhostShadowVirtqueue *svq,
+ const struct iovec *out_sg, size_t out_num,
+ const struct iovec *in_sg, size_t in_num,
+ unsigned *head)
+{
+ bool ok;
+ uint16_t head_flags = 0;
+ g_autofree hwaddr *sgs = g_new(hwaddr, out_num + in_num);
+
+ *head = svq->vring_packed.next_avail_idx;
+
+ /* We need some descriptors here */
+ if (unlikely(!out_num && !in_num)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Guest provided element with no descriptors");
+ return false;
+ }
+
+ uint16_t id, curr, i;
+ unsigned n;
+ struct vring_packed_desc *descs = svq->vring_packed.vring.desc;
+
+ i = *head;
+ id = svq->free_head;
+ curr = id;
+
+ size_t num = out_num + in_num;
+
+ ok = vhost_svq_translate_addr(svq, sgs, out_sg, out_num);
+ if (unlikely(!ok)) {
+ return false;
+ }
+
+ ok = vhost_svq_translate_addr(svq, sgs + out_num, in_sg, in_num);
+ if (unlikely(!ok)) {
+ return false;
+ }
+
+ /* Write descriptors to SVQ packed vring */
+ for (n = 0; n < num; n++) {
+ uint16_t flags = cpu_to_le16(svq->vring_packed.avail_used_flags |
+ (n < out_num ? 0 : VRING_DESC_F_WRITE) |
+ (n + 1 == num ? 0 : VRING_DESC_F_NEXT));
+ if (i == *head) {
+ head_flags = flags;
+ } else {
+ descs[i].flags = flags;
+ }
+
+ descs[i].addr = cpu_to_le64(sgs[n]);
+ descs[i].id = id;
+ if (n < out_num) {
+ descs[i].len = cpu_to_le32(out_sg[n].iov_len);
+ } else {
+ descs[i].len = cpu_to_le32(in_sg[n - out_num].iov_len);
+ }
+
+ curr = cpu_to_le16(svq->desc_next[curr]);
+
+ if (++i >= svq->vring_packed.vring.num) {
+ i = 0;
+ svq->vring_packed.avail_used_flags ^=
+ 1 << VRING_PACKED_DESC_F_AVAIL |
+ 1 << VRING_PACKED_DESC_F_USED;
+ }
+ }
+
+ if (i <= *head) {
+ svq->vring_packed.avail_wrap_counter ^= 1;
+ }
+
+ svq->vring_packed.next_avail_idx = i;
+ svq->free_head = curr;
+
+ /*
+ * A driver MUST NOT make the first descriptor in the list
+ * available before all subsequent descriptors comprising
+ * the list are made available.
+ */
+ smp_wmb();
+ svq->vring_packed.vring.desc[*head].flags = head_flags;
+
+ return true;
+}
+
static void vhost_svq_kick(VhostShadowVirtqueue *svq)
{
bool needs_kick;
@@ -258,7 +343,13 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
return -ENOSPC;
}
- ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
+ if (virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED)) {
+ ok = vhost_svq_add_packed(svq, out_sg, out_num,
+ in_sg, in_num, &qemu_head);
+ } else {
+ ok = vhost_svq_add_split(svq, out_sg, out_num,
+ in_sg, in_num, &qemu_head);
+ }
if (unlikely(!ok)) {
return -EINVAL;
}
--
2.45.2
next prev parent reply other threads:[~2024-07-26 9:59 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-26 9:58 [RFC v2 0/3] Add packed virtqueue to shadow virtqueue Sahil Siddiq
2024-07-26 9:58 ` Sahil Siddiq [this message]
2024-07-26 13:48 ` [RFC v2 1/3] vhost: Introduce packed vq and add buffer elements Eugenio Perez Martin
2024-07-28 17:37 ` Sahil
2024-07-29 8:21 ` Eugenio Perez Martin
2024-08-02 11:26 ` Sahil
2024-07-26 9:58 ` [RFC v2 2/3] vhost: Data structure changes to support packed vqs Sahil Siddiq
2024-07-26 9:58 ` [RFC v2 3/3] vhost: Allocate memory for packed vring Sahil Siddiq
2024-07-26 14:28 ` Eugenio Perez Martin
2024-07-28 13:41 ` Sahil
2024-07-26 13:40 ` [RFC v2 0/3] Add packed virtqueue to shadow virtqueue Eugenio Perez Martin
2024-07-26 17:11 ` Sahil
2024-07-26 18:25 ` Eugenio Perez Martin
2024-07-28 16:42 ` Sahil
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240726095822.104017-2-sahilcdq@proton.me \
--to=icegambit91@gmail.com \
--cc=eperezma@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sahilcdq@proton.me \
--cc=sgarzare@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).