From: Sahil Siddiq <icegambit91@gmail.com>
To: eperezma@redhat.com, sgarzare@redhat.com
Cc: mst@redhat.com, qemu-devel@nongnu.org, Sahil Siddiq <sahilcdq@proton.me>
Subject: [RFC v4 1/5] vhost: Refactor vhost_svq_add_split
Date: Fri, 6 Dec 2024 02:04:26 +0530 [thread overview]
Message-ID: <20241205203430.76251-2-sahilcdq@proton.me> (raw)
In-Reply-To: <20241205203430.76251-1-sahilcdq@proton.me>
This commit refactors vhost_svq_add_split and
vhost_svq_add to simplify their implementation
and prepare for the addition of packed vqs in
following commits.
Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes v3 -> v4:
- Split commit #1 in v3 into 2 commits.
- Changes related to "vhost_svq_add_packed" are
now in commit #2.
hw/virtio/vhost-shadow-virtqueue.c | 102 ++++++++++++-----------------
1 file changed, 41 insertions(+), 61 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 37aca8b431..bb7cf6d5db 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -124,83 +124,48 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
}
/**
- * Write descriptors to SVQ vring
+ * Write descriptors to SVQ split vring
*
* @svq: The shadow virtqueue
- * @sg: Cache for hwaddr
- * @iovec: The iovec from the guest
- * @num: iovec length
- * @more_descs: True if more descriptors come in the chain
- * @write: True if they are writeable descriptors
- *
- * Return true if success, false otherwise and print error.
+ * @out_sg: The iovec to the guest
+ * @out_num: Outgoing iovec length
+ * @in_sg: The iovec from the guest
+ * @in_num: Incoming iovec length
+ * @sgs: Cache for hwaddr
+ * @head: Saves current free_head
*/
-static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
- const struct iovec *iovec, size_t num,
- bool more_descs, bool write)
+static void vhost_svq_add_split(VhostShadowVirtqueue *svq,
+ const struct iovec *out_sg, size_t out_num,
+ const struct iovec *in_sg, size_t in_num,
+ hwaddr *sgs, unsigned *head)
{
+ unsigned avail_idx, n;
uint16_t i = svq->free_head, last = svq->free_head;
- unsigned n;
- uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0;
+ vring_avail_t *avail = svq->vring.avail;
vring_desc_t *descs = svq->vring.desc;
- bool ok;
-
- if (num == 0) {
- return true;
- }
+ size_t num = in_num + out_num;
- ok = vhost_svq_translate_addr(svq, sg, iovec, num);
- if (unlikely(!ok)) {
- return false;
- }
+ *head = svq->free_head;
for (n = 0; n < num; n++) {
- if (more_descs || (n + 1 < num)) {
- descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT);
+ descs[i].flags = cpu_to_le16(n < out_num ? 0 : VRING_DESC_F_WRITE);
+ if (n + 1 < num) {
+ descs[i].flags |= cpu_to_le16(VRING_DESC_F_NEXT);
descs[i].next = cpu_to_le16(svq->desc_next[i]);
+ }
+
+ descs[i].addr = cpu_to_le64(sgs[n]);
+ if (n < out_num) {
+ descs[i].len = cpu_to_le32(out_sg[n].iov_len);
} else {
- descs[i].flags = flags;
+ descs[i].len = cpu_to_le32(in_sg[n - out_num].iov_len);
}
- descs[i].addr = cpu_to_le64(sg[n]);
- descs[i].len = cpu_to_le32(iovec[n].iov_len);
last = i;
i = cpu_to_le16(svq->desc_next[i]);
}
svq->free_head = le16_to_cpu(svq->desc_next[last]);
- return true;
-}
-
-static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
- const struct iovec *out_sg, size_t out_num,
- const struct iovec *in_sg, size_t in_num,
- unsigned *head)
-{
- unsigned avail_idx;
- vring_avail_t *avail = svq->vring.avail;
- bool ok;
- g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num));
-
- *head = svq->free_head;
-
- /* We need some descriptors here */
- if (unlikely(!out_num && !in_num)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "Guest provided element with no descriptors");
- return false;
- }
-
- ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0,
- false);
- if (unlikely(!ok)) {
- return false;
- }
-
- ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true);
- if (unlikely(!ok)) {
- return false;
- }
/*
* Put the entry in the available array (but don't update avail->idx until
@@ -214,7 +179,6 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
smp_wmb();
avail->idx = cpu_to_le16(svq->shadow_avail_idx);
- return true;
}
static void vhost_svq_kick(VhostShadowVirtqueue *svq)
@@ -254,15 +218,31 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
unsigned ndescs = in_num + out_num;
bool ok;
+ /* We need some descriptors here */
+ if (unlikely(!ndescs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Guest provided element with no descriptors");
+ return -EINVAL;
+ }
+
if (unlikely(ndescs > vhost_svq_available_slots(svq))) {
return -ENOSPC;
}
- ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
+ g_autofree hwaddr *sgs = g_new(hwaddr, ndescs);
+ ok = vhost_svq_translate_addr(svq, sgs, out_sg, out_num);
if (unlikely(!ok)) {
return -EINVAL;
}
+ ok = vhost_svq_translate_addr(svq, sgs + out_num, in_sg, in_num);
+ if (unlikely(!ok)) {
+ return -EINVAL;
+ }
+
+ vhost_svq_add_split(svq, out_sg, out_num, in_sg,
+ in_num, sgs, &qemu_head);
+
svq->num_free -= ndescs;
svq->desc_state[qemu_head].elem = elem;
svq->desc_state[qemu_head].ndescs = ndescs;
--
2.47.0
next prev parent reply other threads:[~2024-12-05 20:36 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-12-05 20:34 [RFC v4 0/5] Add packed virtqueue to shadow virtqueue Sahil Siddiq
2024-12-05 20:34 ` Sahil Siddiq [this message]
2024-12-10 8:40 ` [RFC v4 1/5] vhost: Refactor vhost_svq_add_split Eugenio Perez Martin
2024-12-05 20:34 ` [RFC v4 2/5] vhost: Write descriptors to packed svq Sahil Siddiq
2024-12-10 8:54 ` Eugenio Perez Martin
2024-12-11 15:58 ` Sahil Siddiq
2024-12-05 20:34 ` [RFC v4 3/5] vhost: Data structure changes to support packed vqs Sahil Siddiq
2024-12-10 8:55 ` Eugenio Perez Martin
2024-12-11 15:59 ` Sahil Siddiq
2024-12-05 20:34 ` [RFC v4 4/5] vdpa: Allocate memory for svq and map them to vdpa Sahil Siddiq
2024-12-05 20:34 ` [RFC v4 5/5] vdpa: Support setting vring_base for packed svq Sahil Siddiq
2024-12-10 9:27 ` [RFC v4 0/5] Add packed virtqueue to shadow virtqueue Eugenio Perez Martin
2024-12-11 15:57 ` Sahil Siddiq
2024-12-15 17:27 ` Sahil Siddiq
2024-12-16 8:39 ` Eugenio Perez Martin
2024-12-17 5:45 ` Sahil Siddiq
2024-12-17 7:50 ` Eugenio Perez Martin
2024-12-19 19:37 ` Sahil Siddiq
2024-12-20 6:58 ` Eugenio Perez Martin
2025-01-03 13:06 ` Sahil Siddiq
2025-01-07 8:05 ` Eugenio Perez Martin
2025-01-19 6:37 ` Sahil Siddiq
2025-01-21 16:37 ` Eugenio Perez Martin
2025-01-24 5:46 ` Sahil Siddiq
2025-01-24 7:34 ` Eugenio Perez Martin
2025-01-31 5:04 ` Sahil Siddiq
2025-01-31 6:57 ` Eugenio Perez Martin
2025-02-04 12:49 ` Sahil Siddiq
2025-02-04 18:10 ` Eugenio Perez Martin
2025-02-04 18:15 ` Eugenio Perez Martin
2025-02-06 5:26 ` Sahil Siddiq
2025-02-06 7:12 ` Eugenio Perez Martin
2025-02-06 15:17 ` Sahil Siddiq
2025-02-10 10:58 ` Sahil Siddiq
2025-02-10 14:23 ` Eugenio Perez Martin
2025-02-10 16:25 ` Sahil Siddiq
2025-02-11 7:57 ` Eugenio Perez Martin
2025-03-06 5:25 ` Sahil Siddiq
2025-03-06 7:23 ` Eugenio Perez Martin
2025-03-24 13:54 ` Sahil Siddiq
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241205203430.76251-2-sahilcdq@proton.me \
--to=icegambit91@gmail.com \
--cc=eperezma@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sahilcdq@proton.me \
--cc=sgarzare@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).