qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: wexu@redhat.com
To: wexu@redhat.com, jasowang@redhat.com, mst@redhat.com,
	tiwei.bie@intel.com, jfreimann@redhat.com, qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH 2/8] virtio: memory cache for packed ring
Date: Wed,  4 Apr 2018 20:53:58 +0800	[thread overview]
Message-ID: <1522846444-31725-3-git-send-email-wexu@redhat.com> (raw)
In-Reply-To: <1522846444-31725-1-git-send-email-wexu@redhat.com>

From: Wei Xu <wexu@redhat.com>

A new memory cache is introduced to for packed ring,
the code looks pretty duplicated with split(1.0) ring,
any refactor idea?

Signed-off-by: Wei Xu <wexu@redhat.com>
---
 hw/virtio/virtio.c | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 76 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 9a6bfe7..73a35a4 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -155,13 +155,15 @@ static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
         return;
     }
 
+    /* FIX ME: pass in 1.1 device here, reuse 1.0 fields at current */
+
     address_space_cache_destroy(&caches->desc);
     address_space_cache_destroy(&caches->avail);
     address_space_cache_destroy(&caches->used);
     g_free(caches);
 }
 
-static void virtio_init_region_cache(VirtIODevice *vdev, int n)
+static void virtio_init_region_cache_split(VirtIODevice *vdev, int n)
 {
     VirtQueue *vq = &vdev->vq[n];
     VRingMemoryRegionCaches *old = vq->vring.caches;
@@ -215,6 +217,65 @@ err_desc:
     g_free(new);
 }
 
+static void virtio_init_region_cache_packed(VirtIODevice *vdev, int n)
+{
+    VirtQueue *vq = &vdev->vq[n];
+    VRingMemoryRegionCaches *old = vq->vring.caches;
+    VRingMemoryRegionCaches *new;
+    hwaddr addr, size;
+    int64_t len;
+
+    addr = vq->packed.desc;
+    if (!addr) {
+        return;
+    }
+    new = g_new0(VRingMemoryRegionCaches, 1);
+    size = virtio_queue_get_desc_size(vdev, n);
+    len = address_space_cache_init(&new->desc_packed, vdev->dma_as,
+                                   addr, size, false);
+    if (len < size) {
+        virtio_error(vdev, "Cannot map desc");
+        goto err_desc;
+    }
+
+    size = sizeof(struct VRingPackedDescEvent);
+    len = address_space_cache_init(&new->driver, vdev->dma_as,
+                                   vq->packed.driver, size, true);
+    if (len < size) {
+        virtio_error(vdev, "Cannot map driver area");
+        goto err_driver;
+    }
+
+    len = address_space_cache_init(&new->device, vdev->dma_as,
+                                   vq->packed.device, size, true);
+    if (len < size) {
+        virtio_error(vdev, "Cannot map device area");
+        goto err_device;
+    }
+
+    atomic_rcu_set(&vq->packed.caches, new);
+    if (old) {
+        call_rcu(old, virtio_free_region_cache, rcu);
+    }
+    return;
+
+err_device:
+    address_space_cache_destroy(&new->driver);
+err_driver:
+    address_space_cache_destroy(&new->desc);
+err_desc:
+    g_free(new);
+}
+
+static void virtio_init_region_cache(VirtIODevice *vdev, int n)
+{
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        virtio_init_region_cache_packed(vdev, n);
+    } else {
+        virtio_init_region_cache_split(vdev, n);
+    }
+}
+
 /* virt queue functions */
 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
 {
@@ -245,10 +306,18 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
 
 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
 {
-    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+    VRingMemoryRegionCaches *caches;
+
+    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+        caches = atomic_rcu_read(&vq->packed.caches);
+    } else {
+        caches = atomic_rcu_read(&vq->vring.caches);
+    }
+
     assert(caches != NULL);
     return caches;
 }
+
 /* Called within rcu_read_lock().  */
 static inline uint16_t vring_avail_flags(VirtQueue *vq)
 {
@@ -2331,7 +2400,11 @@ hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
 
 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
 {
-    return sizeof(VRingDesc) * vdev->vq[n].vring.num;
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return sizeof(VRingDescPacked) * vdev->vq[n].packed.num;
+    } else {
+        return sizeof(VRingDesc) * vdev->vq[n].vring.num;
+    }
 }
 
 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
-- 
2.7.4

  parent reply	other threads:[~2018-04-04 12:55 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-04 12:53 [Qemu-devel] [RFC PATCH 0/8] virtio-net 1.1 userspace backend support wexu
2018-04-04 12:53 ` [Qemu-devel] [PATCH 1/8] virtio: feature bit, data structure for packed ring wexu
2018-04-10  7:05   ` Jason Wang
2018-06-03 16:21     ` Wei Xu
2018-04-04 12:53 ` wexu [this message]
2018-04-10  7:06   ` [Qemu-devel] [PATCH 2/8] virtio: memory cache " Jason Wang
2018-04-04 12:53 ` [Qemu-devel] [PATCH 3/8] virtio: add empty check " wexu
2018-04-10  7:23   ` Jason Wang
2018-06-03 17:44     ` Wei Xu
2018-06-04  8:32       ` Jason Wang
2018-04-04 12:54 ` [Qemu-devel] [PATCH 4/8] virtio: add detach element for packed ring(1.1) wexu
2018-04-10  7:32   ` Jason Wang
2018-06-04  1:34     ` Wei Xu
2018-06-04  1:54       ` Michael S. Tsirkin
2018-06-04  9:40         ` Wei Xu
2018-04-04 12:54 ` [Qemu-devel] [PATCH 5/8] virtio: notification tweak for packed ring wexu
2018-04-04 12:54 ` [Qemu-devel] [PATCH 6/8] virtio: flush/push support " wexu
2018-04-11  2:58   ` Jason Wang
2018-04-04 12:54 ` [Qemu-devel] [PATCH 7/8] virtio: get avail bytes check " wexu
2018-04-11  3:03   ` Jason Wang
2018-06-04  6:07     ` Wei Xu
2018-04-04 12:54 ` [Qemu-devel] [PATCH 8/8] virtio: queue pop support " wexu
2018-04-11  2:43   ` Jason Wang
2018-06-04  7:07     ` Wei Xu
2018-04-04 13:11 ` [Qemu-devel] [RFC PATCH 0/8] virtio-net 1.1 userspace backend support no-reply
2018-04-04 13:14 ` no-reply
2018-04-04 13:14 ` no-reply
2018-04-10  3:46 ` Jason Wang
2018-04-11  2:22   ` Wei Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1522846444-31725-3-git-send-email-wexu@redhat.com \
    --to=wexu@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=jfreimann@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=tiwei.bie@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).