qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: wexu@redhat.com
To: qemu-devel@nongnu.org
Cc: jasowang@redhat.com, tiwei.bie@intel.com, wexu@redhat.com,
	jfreimann@redhat.com, maxime.coquelin@redhat.com
Subject: [Qemu-devel] [[RFC v3 03/12] virtio: init memory cache for packed ring
Date: Thu, 11 Oct 2018 10:08:26 -0400	[thread overview]
Message-ID: <1539266915-15216-4-git-send-email-wexu@redhat.com> (raw)
In-Reply-To: <1539266915-15216-1-git-send-email-wexu@redhat.com>

From: Wei Xu <wexu@redhat.com>

Expand 1.0 by adding offset calculation accordingly.

Signed-off-by: Wei Xu <wexu@redhat.com>
---
 hw/virtio/vhost.c          | 16 ++++++++--------
 hw/virtio/virtio.c         | 35 +++++++++++++++++++++++------------
 include/hw/virtio/virtio.h |  4 ++--
 3 files changed, 33 insertions(+), 22 deletions(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 569c405..9df2da3 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -996,14 +996,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
         r = -ENOMEM;
         goto fail_alloc_desc;
     }
-    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
+    vq->avail_size = s = l = virtio_queue_get_driver_size(vdev, idx);
     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
     vq->avail = vhost_memory_map(dev, a, &l, 0);
     if (!vq->avail || l != s) {
         r = -ENOMEM;
         goto fail_alloc_avail;
     }
-    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
+    vq->used_size = s = l = virtio_queue_get_device_size(vdev, idx);
     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
     vq->used = vhost_memory_map(dev, a, &l, 1);
     if (!vq->used || l != s) {
@@ -1051,10 +1051,10 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
 fail_vector:
 fail_kick:
 fail_alloc:
-    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
+    vhost_memory_unmap(dev, vq->used, virtio_queue_get_device_size(vdev, idx),
                        0, 0);
 fail_alloc_used:
-    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
+    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_driver_size(vdev, idx),
                        0, 0);
 fail_alloc_avail:
     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
@@ -1101,10 +1101,10 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
                                                 vhost_vq_index);
     }
 
-    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
-                       1, virtio_queue_get_used_size(vdev, idx));
-    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
-                       0, virtio_queue_get_avail_size(vdev, idx));
+    vhost_memory_unmap(dev, vq->used, virtio_queue_get_device_size(vdev, idx),
+                       1, virtio_queue_get_device_size(vdev, idx));
+    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_driver_size(vdev, idx),
+                       0, virtio_queue_get_driver_size(vdev, idx));
     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                        0, virtio_queue_get_desc_size(vdev, idx));
 }
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 500eecf..bfb3364 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -162,11 +162,8 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
     VRingMemoryRegionCaches *old = vq->vring.caches;
     VRingMemoryRegionCaches *new = NULL;
     hwaddr addr, size;
-    int event_size;
     int64_t len;
 
-    event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
-
     addr = vq->vring.desc;
     if (!addr) {
         goto out_no_cache;
@@ -174,13 +171,13 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
     new = g_new0(VRingMemoryRegionCaches, 1);
     size = virtio_queue_get_desc_size(vdev, n);
     len = address_space_cache_init(&new->desc, vdev->dma_as,
-                                   addr, size, false);
+                                   addr, size, true);
     if (len < size) {
         virtio_error(vdev, "Cannot map desc");
         goto err_desc;
     }
 
-    size = virtio_queue_get_used_size(vdev, n) + event_size;
+    size = virtio_queue_get_device_size(vdev, n);
     len = address_space_cache_init(&new->used, vdev->dma_as,
                                    vq->vring.used, size, true);
     if (len < size) {
@@ -188,7 +185,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
         goto err_used;
     }
 
-    size = virtio_queue_get_avail_size(vdev, n) + event_size;
+    size = virtio_queue_get_driver_size(vdev, n);
     len = address_space_cache_init(&new->avail, vdev->dma_as,
                                    vq->vring.avail, size, false);
     if (len < size) {
@@ -2339,16 +2336,30 @@ hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
 }
 
-hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
+hwaddr virtio_queue_get_driver_size(VirtIODevice *vdev, int n)
 {
-    return offsetof(VRingAvail, ring) +
-        sizeof(uint16_t) * vdev->vq[n].vring.num;
+    int s;
+
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return sizeof(struct VRingPackedDescEvent);
+    } else {
+        s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+        return offsetof(VRingAvail, ring) +
+            sizeof(uint16_t) * vdev->vq[n].vring.num + s;
+    }
 }
 
-hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
+hwaddr virtio_queue_get_device_size(VirtIODevice *vdev, int n)
 {
-    return offsetof(VRingUsed, ring) +
-        sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
+    int s;
+
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return sizeof(struct VRingPackedDescEvent);
+    } else {
+        s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+        return offsetof(VRingUsed, ring) +
+            sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
+    }
 }
 
 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 9c1fa07..e323e76 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -270,8 +270,8 @@ hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
-hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
-hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
+hwaddr virtio_queue_get_driver_size(VirtIODevice *vdev, int n);
+hwaddr virtio_queue_get_device_size(VirtIODevice *vdev, int n);
 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n);
-- 
1.8.3.1

  parent reply	other threads:[~2018-10-11 14:09 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-11 14:08 [Qemu-devel] [RFC v3 00/12] packed ring virtio-net userspace backend support wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 01/12] virtio: introduce packed ring definitions wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 02/12] virtio: redefine structure & memory cache for packed ring wexu
2018-10-15  3:03   ` Jason Wang
2018-10-15  7:26     ` Wei Xu
2018-10-15  8:03       ` Jason Wang
2018-10-11 14:08 ` wexu [this message]
2018-10-15  3:10   ` [Qemu-devel] [[RFC v3 03/12] virtio: init " Jason Wang
2018-10-15  7:09     ` Wei Xu
2018-10-15  7:54       ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 04/12] virtio: init wrap counter " wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 05/12] virtio: init and desc empty check " wexu
2018-10-15  3:18   ` Jason Wang
2018-10-15  7:04     ` Wei Xu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 06/12] virtio: get avail bytes " wexu
2018-10-15  3:47   ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 07/12] virtio: fill/flush/pop " wexu
2018-10-15  6:14   ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 08/12] virtio: event suppression support " wexu
2018-10-15  6:55   ` Jason Wang
2018-10-15  6:59   ` Jason Wang
2018-10-15  8:20     ` Wei Xu
2018-10-15  9:11       ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 09/12] virtio-net: fill head desc after done all in a chain wexu
2018-10-15  7:45   ` Jason Wang
2018-10-15  8:03     ` Wei Xu
2018-10-15  8:05       ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 10/12] virtio: packed ring feature bit for userspace backend wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 11/12] virtio: enable packed ring via a new command line wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 12/12] virtio: feature vhost-net support for packed ring wexu
2018-10-15  7:50   ` Jason Wang
2018-10-15  8:11     ` Wei Xu
2018-11-21 13:03   ` Maxime Coquelin
2018-11-22  3:46     ` Wei Xu
2018-11-21 14:39 ` [Qemu-devel] [RFC v3 00/12] packed ring virtio-net userspace backend support Tiwei Bie
2018-11-22  3:43   ` Wei Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1539266915-15216-4-git-send-email-wexu@redhat.com \
    --to=wexu@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=jfreimann@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=tiwei.bie@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).