From: Sahil Siddiq <icegambit91@gmail.com>
To: eperezma@redhat.com, sgarzare@redhat.com
Cc: mst@redhat.com, qemu-devel@nongnu.org, Sahil Siddiq <sahilcdq@proton.me>
Subject: [RFC v4 4/5] vdpa: Allocate memory for svq and map them to vdpa
Date: Fri, 6 Dec 2024 02:04:29 +0530 [thread overview]
Message-ID: <20241205203430.76251-5-sahilcdq@proton.me> (raw)
In-Reply-To: <20241205203430.76251-1-sahilcdq@proton.me>
Allocate memory for the packed vq format and map them to the vdpa
device.
Since "struct vring" and "struct vring_packed's vring" both have
the same layout memory, the implementation in svq start and svq
stop should not differ based on the vq's format.
Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes v3 -> v4:
- Based on commit #3 of v3.
- vhost-shadow-virtqueue.c
(vhost_svq_memory_packed): Remove function.
(vhost_svq_driver_area_size,vhost_svq_descriptor_area_size): Decouple functions.
(vhost_svq_device_area_size): Rewrite function.
(vhost_svq_start): Simplify implementation.
(vhost_svq_stop): Unconditionally munmap().
- vhost-shadow-virtqueue.h: New function declaration.
- vhost-vdpa.c
(vhost_vdpa_svq_unmap_rings): Call vhost_vdpa_svq_unmap_ring().
(vhost_vdpa_svq_map_rings): New mappings.
hw/virtio/vhost-shadow-virtqueue.c | 47 ++++++++++++++++++++----------
hw/virtio/vhost-shadow-virtqueue.h | 1 +
hw/virtio/vhost-vdpa.c | 34 +++++++++++++++++----
3 files changed, 60 insertions(+), 22 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 6eee01ab3c..be06b12c9a 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -314,7 +314,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
return -EINVAL;
}
- if (virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (svq->is_packed) {
vhost_svq_add_packed(svq, out_sg, out_num, in_sg,
in_num, sgs, &qemu_head);
} else {
@@ -661,19 +661,33 @@ void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
}
-size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
+size_t vhost_svq_descriptor_area_size(const VhostShadowVirtqueue *svq)
{
size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
- size_t avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) +
- sizeof(uint16_t);
+ return ROUND_UP(desc_size, qemu_real_host_page_size());
+}
- return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
+size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
+{
+ size_t avail_size;
+ if (svq->is_packed) {
+ avail_size = sizeof(uint32_t);
+ } else {
+ avail_size = offsetof(vring_avail_t, ring[svq->vring.num]) +
+ sizeof(uint16_t);
+ }
+ return ROUND_UP(avail_size, qemu_real_host_page_size());
}
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
{
- size_t used_size = offsetof(vring_used_t, ring[svq->vring.num]) +
- sizeof(uint16_t);
+ size_t used_size;
+ if (svq->is_packed) {
+ used_size = sizeof(uint32_t);
+ } else {
+ used_size = offsetof(vring_used_t, ring[svq->vring.num]) +
+ sizeof(uint16_t);
+ }
return ROUND_UP(used_size, qemu_real_host_page_size());
}
@@ -718,8 +732,6 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq, VhostIOVATree *iova_tree)
{
- size_t desc_size;
-
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
svq->next_guest_avail_elem = NULL;
svq->shadow_avail_idx = 0;
@@ -728,20 +740,22 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
svq->vdev = vdev;
svq->vq = vq;
svq->iova_tree = iova_tree;
+ svq->is_packed = virtio_vdev_has_feature(svq->vdev, VIRTIO_F_RING_PACKED);
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
svq->num_free = svq->vring.num;
- svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq),
+ svq->vring.desc = mmap(NULL, vhost_svq_descriptor_area_size(svq),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
- desc_size = sizeof(vring_desc_t) * svq->vring.num;
- svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
+ svq->vring.avail = mmap(NULL, vhost_svq_driver_area_size(svq),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
- svq->desc_state = g_new0(SVQDescState, svq->vring.num);
- svq->desc_next = g_new0(uint16_t, svq->vring.num);
- for (unsigned i = 0; i < svq->vring.num - 1; i++) {
+ svq->desc_state = g_new0(SVQDescState, svq->num_free);
+ svq->desc_next = g_new0(uint16_t, svq->num_free);
+ for (unsigned i = 0; i < svq->num_free - 1; i++) {
svq->desc_next[i] = cpu_to_le16(i + 1);
}
}
@@ -781,7 +795,8 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
svq->vq = NULL;
g_free(svq->desc_next);
g_free(svq->desc_state);
- munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
+ munmap(svq->vring.desc, vhost_svq_descriptor_area_size(svq));
+ munmap(svq->vring.avail, vhost_svq_driver_area_size(svq));
munmap(svq->vring.used, vhost_svq_device_area_size(svq));
event_notifier_set_handler(&svq->hdev_call, NULL);
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index ce89bafedc..6c0e0c4f67 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -151,6 +151,7 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
struct vhost_vring_addr *addr);
+size_t vhost_svq_descriptor_area_size(const VhostShadowVirtqueue *svq);
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 3cdaa12ed5..97ed569792 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1134,6 +1134,8 @@ static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
+ vhost_vdpa_svq_unmap_ring(v, svq_addr.avail_user_addr);
+
vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
}
@@ -1181,38 +1183,58 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
Error **errp)
{
ERRP_GUARD();
- DMAMap device_region, driver_region;
+ DMAMap descriptor_region, device_region, driver_region;
struct vhost_vring_addr svq_addr;
struct vhost_vdpa *v = dev->opaque;
+ size_t descriptor_size = vhost_svq_descriptor_area_size(svq);
size_t device_size = vhost_svq_device_area_size(svq);
size_t driver_size = vhost_svq_driver_area_size(svq);
- size_t avail_offset;
bool ok;
vhost_svq_get_vring_addr(svq, &svq_addr);
- driver_region = (DMAMap) {
+ descriptor_region = (DMAMap) {
.translated_addr = svq_addr.desc_user_addr,
+ .size = descriptor_size - 1,
+ .perm = IOMMU_RO,
+ };
+ if (svq->is_packed) {
+ descriptor_region.perm = IOMMU_RW;
+ }
+
+ ok = vhost_vdpa_svq_map_ring(v, &descriptor_region, errp);
+ if (unlikely(!ok)) {
+ error_prepend(errp, "Cannot create vq descriptor region: ");
+ return false;
+ }
+ addr->desc_user_addr = descriptor_region.iova;
+
+ driver_region = (DMAMap) {
+ .translated_addr = svq_addr.avail_user_addr,
.size = driver_size - 1,
.perm = IOMMU_RO,
};
ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
if (unlikely(!ok)) {
error_prepend(errp, "Cannot create vq driver region: ");
+ vhost_vdpa_svq_unmap_ring(v, descriptor_region.translated_addr);
return false;
}
- addr->desc_user_addr = driver_region.iova;
- avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
- addr->avail_user_addr = driver_region.iova + avail_offset;
+ addr->avail_user_addr = driver_region.iova;
device_region = (DMAMap) {
.translated_addr = svq_addr.used_user_addr,
.size = device_size - 1,
.perm = IOMMU_RW,
};
+ if (svq->is_packed) {
+ device_region.perm = IOMMU_WO;
+ }
+
ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
if (unlikely(!ok)) {
error_prepend(errp, "Cannot create vq device region: ");
+ vhost_vdpa_svq_unmap_ring(v, descriptor_region.translated_addr);
vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
}
addr->used_user_addr = device_region.iova;
--
2.47.0
next prev parent reply other threads:[~2024-12-05 20:36 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-12-05 20:34 [RFC v4 0/5] Add packed virtqueue to shadow virtqueue Sahil Siddiq
2024-12-05 20:34 ` [RFC v4 1/5] vhost: Refactor vhost_svq_add_split Sahil Siddiq
2024-12-10 8:40 ` Eugenio Perez Martin
2024-12-05 20:34 ` [RFC v4 2/5] vhost: Write descriptors to packed svq Sahil Siddiq
2024-12-10 8:54 ` Eugenio Perez Martin
2024-12-11 15:58 ` Sahil Siddiq
2024-12-05 20:34 ` [RFC v4 3/5] vhost: Data structure changes to support packed vqs Sahil Siddiq
2024-12-10 8:55 ` Eugenio Perez Martin
2024-12-11 15:59 ` Sahil Siddiq
2024-12-05 20:34 ` Sahil Siddiq [this message]
2024-12-05 20:34 ` [RFC v4 5/5] vdpa: Support setting vring_base for packed svq Sahil Siddiq
2024-12-10 9:27 ` [RFC v4 0/5] Add packed virtqueue to shadow virtqueue Eugenio Perez Martin
2024-12-11 15:57 ` Sahil Siddiq
2024-12-15 17:27 ` Sahil Siddiq
2024-12-16 8:39 ` Eugenio Perez Martin
2024-12-17 5:45 ` Sahil Siddiq
2024-12-17 7:50 ` Eugenio Perez Martin
2024-12-19 19:37 ` Sahil Siddiq
2024-12-20 6:58 ` Eugenio Perez Martin
2025-01-03 13:06 ` Sahil Siddiq
2025-01-07 8:05 ` Eugenio Perez Martin
2025-01-19 6:37 ` Sahil Siddiq
2025-01-21 16:37 ` Eugenio Perez Martin
2025-01-24 5:46 ` Sahil Siddiq
2025-01-24 7:34 ` Eugenio Perez Martin
2025-01-31 5:04 ` Sahil Siddiq
2025-01-31 6:57 ` Eugenio Perez Martin
2025-02-04 12:49 ` Sahil Siddiq
2025-02-04 18:10 ` Eugenio Perez Martin
2025-02-04 18:15 ` Eugenio Perez Martin
2025-02-06 5:26 ` Sahil Siddiq
2025-02-06 7:12 ` Eugenio Perez Martin
2025-02-06 15:17 ` Sahil Siddiq
2025-02-10 10:58 ` Sahil Siddiq
2025-02-10 14:23 ` Eugenio Perez Martin
2025-02-10 16:25 ` Sahil Siddiq
2025-02-11 7:57 ` Eugenio Perez Martin
2025-03-06 5:25 ` Sahil Siddiq
2025-03-06 7:23 ` Eugenio Perez Martin
2025-03-24 13:54 ` Sahil Siddiq
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241205203430.76251-5-sahilcdq@proton.me \
--to=icegambit91@gmail.com \
--cc=eperezma@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sahilcdq@proton.me \
--cc=sgarzare@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).