From: Sahil Siddiq <icegambit91@gmail.com>
To: eperezma@redhat.com, sgarzare@redhat.com
Cc: mst@redhat.com, qemu-devel@nongnu.org, icegambit91@gmail.com,
Sahil Siddiq <sahilcdq@proton.me>
Subject: [RFC v3 3/3] vhost: Allocate memory for packed vring
Date: Fri, 2 Aug 2024 16:51:38 +0530 [thread overview]
Message-ID: <20240802112138.46831-4-sahilcdq@proton.me> (raw)
In-Reply-To: <20240802112138.46831-1-sahilcdq@proton.me>
Allocate memory for the packed vq format and support
packed vq in the SVQ "start" and "stop" operations.
Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes v2 -> v3:
* vhost-shadow-virtqueue.c
(vhost_svq_memory_packed): New function
(vhost_svq_start):
- Remove common variables out of if-else branch.
(vhost_svq_stop):
- Add support for packed vq.
(vhost_svq_get_vring_addr): Revert changes
(vhost_svq_get_vring_addr_packed): Likwise.
* vhost-shadow-virtqueue.h
- Revert changes made to "vhost_svq_get_vring_addr*"
functions.
* vhost-vdpa.c: Revert changes.
hw/virtio/vhost-shadow-virtqueue.c | 56 +++++++++++++++++++++++-------
hw/virtio/vhost-shadow-virtqueue.h | 4 +++
2 files changed, 47 insertions(+), 13 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 4c308ee53d..f4285db2b4 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -645,6 +645,8 @@ void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd)
/**
* Get the shadow vq vring address.
+ * This is used irrespective of whether the
+ * split or packed vq format is used.
* @svq: Shadow virtqueue
* @addr: Destination to store address
*/
@@ -672,6 +674,16 @@ size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
return ROUND_UP(used_size, qemu_real_host_page_size());
}
+size_t vhost_svq_memory_packed(const VhostShadowVirtqueue *svq)
+{
+ size_t desc_size = sizeof(struct vring_packed_desc) * svq->num_free;
+ size_t driver_event_suppression = sizeof(struct vring_packed_desc_event);
+ size_t device_event_suppression = sizeof(struct vring_packed_desc_event);
+
+ return ROUND_UP(desc_size + driver_event_suppression + device_event_suppression,
+ qemu_real_host_page_size());
+}
+
/**
* Set a new file descriptor for the guest to kick the SVQ and notify for avail
*
@@ -726,17 +738,30 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
svq->num_free = svq->vring.num;
- svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
- -1, 0);
- desc_size = sizeof(vring_desc_t) * svq->vring.num;
- svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
- svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
- -1, 0);
- svq->desc_state = g_new0(SVQDescState, svq->vring.num);
- svq->desc_next = g_new0(uint16_t, svq->vring.num);
- for (unsigned i = 0; i < svq->vring.num - 1; i++) {
+ svq->is_packed = virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED);
+
+ if (virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED)) {
+ svq->vring_packed.vring.desc = mmap(NULL, vhost_svq_memory_packed(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
+ desc_size = sizeof(struct vring_packed_desc) * svq->vring.num;
+ svq->vring_packed.vring.driver = (void *)((char *)svq->vring_packed.vring.desc + desc_size);
+ svq->vring_packed.vring.device = (void *)((char *)svq->vring_packed.vring.driver +
+ sizeof(struct vring_packed_desc_event));
+ } else {
+ svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
+ desc_size = sizeof(vring_desc_t) * svq->vring.num;
+ svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
+ svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
+ }
+
+ svq->desc_state = g_new0(SVQDescState, svq->num_free);
+ svq->desc_next = g_new0(uint16_t, svq->num_free);
+ for (unsigned i = 0; i < svq->num_free - 1; i++) {
svq->desc_next[i] = cpu_to_le16(i + 1);
}
}
@@ -776,8 +801,13 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
svq->vq = NULL;
g_free(svq->desc_next);
g_free(svq->desc_state);
- munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
- munmap(svq->vring.used, vhost_svq_device_area_size(svq));
+
+ if (svq->is_packed) {
+ munmap(svq->vring_packed.vring.desc, vhost_svq_memory_packed(svq));
+ } else {
+ munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
+ munmap(svq->vring.used, vhost_svq_device_area_size(svq));
+ }
event_notifier_set_handler(&svq->hdev_call, NULL);
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index ee1a87f523..03b722a186 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -67,6 +67,9 @@ struct vring_packed {
/* Shadow virtqueue to relay notifications */
typedef struct VhostShadowVirtqueue {
+ /* True if packed virtqueue */
+ bool is_packed;
+
/* Virtio queue shadowing */
VirtQueue *vq;
@@ -150,6 +153,7 @@ void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
struct vhost_vring_addr *addr);
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
+size_t vhost_svq_memory_packed(const VhostShadowVirtqueue *svq);
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq, VhostIOVATree *iova_tree);
--
2.45.2
next prev parent reply other threads:[~2024-08-02 11:23 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-02 11:21 [RFC v3 0/3] Add packed virtqueue to shadow virtqueue Sahil Siddiq
2024-08-02 11:21 ` [RFC v3 1/3] vhost: Introduce packed vq and add buffer elements Sahil Siddiq
2024-08-07 16:40 ` Eugenio Perez Martin
2024-08-02 11:21 ` [RFC v3 2/3] vhost: Data structure changes to support packed vqs Sahil Siddiq
2024-08-02 11:21 ` Sahil Siddiq [this message]
2024-08-07 16:22 ` [RFC v3 3/3] vhost: Allocate memory for packed vring Eugenio Perez Martin
2024-08-11 15:37 ` Sahil
2024-08-11 17:20 ` Sahil
2024-08-12 6:31 ` Eugenio Perez Martin
2024-08-12 19:32 ` Sahil
2024-08-13 6:53 ` Eugenio Perez Martin
2024-08-21 12:19 ` Sahil
2024-08-27 15:30 ` Eugenio Perez Martin
2024-08-30 10:20 ` Sahil
2024-08-30 10:48 ` Eugenio Perez Martin
2024-09-08 19:46 ` Sahil
2024-09-09 12:34 ` Eugenio Perez Martin
2024-09-11 19:36 ` Sahil
2024-09-12 9:54 ` Eugenio Perez Martin
2024-09-16 4:34 ` Sahil
2024-09-24 5:31 ` Sahil
2024-09-24 10:46 ` Eugenio Perez Martin
2024-09-30 5:34 ` Sahil
2024-10-28 5:37 ` Sahil Siddiq
2024-10-28 8:10 ` Eugenio Perez Martin
2024-10-31 5:10 ` Sahil Siddiq
2024-11-13 5:10 ` Sahil Siddiq
2024-11-13 11:30 ` Eugenio Perez Martin
2024-12-05 20:38 ` Sahil Siddiq
2024-08-07 16:41 ` [RFC v3 0/3] Add packed virtqueue to shadow virtqueue Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240802112138.46831-4-sahilcdq@proton.me \
--to=icegambit91@gmail.com \
--cc=eperezma@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sahilcdq@proton.me \
--cc=sgarzare@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).