qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: wexu@redhat.com
To: qemu-devel@nongnu.org
Cc: wexu@redhat.com, jasowang@redhat.com, mst@redhat.com,
	jfreiman@redhat.com, maxime.coquelin@redhat.com,
	tiwei.bie@intel.com
Subject: [Qemu-devel] [PATCH v4 06/11] virtio: get avail bytes check for packed ring
Date: Wed, 13 Feb 2019 23:26:37 -0500	[thread overview]
Message-ID: <1550118402-4057-7-git-send-email-wexu@redhat.com> (raw)
In-Reply-To: <1550118402-4057-1-git-send-email-wexu@redhat.com>

From: Wei Xu <wexu@redhat.com>

Add packed ring headcount check.

Common part of split/packed ring are kept.

Signed-off-by: Wei Xu <wexu@redhat.com>
---
 hw/virtio/virtio.c | 197 ++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 179 insertions(+), 18 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index f2ff980..832287b 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -368,6 +368,17 @@ int virtio_queue_ready(VirtQueue *vq)
     return vq->vring.avail != 0;
 }
 
+static void vring_packed_desc_read(VirtIODevice *vdev, VRingPackedDesc *desc,
+                            MemoryRegionCache *cache, int i)
+{
+    address_space_read_cached(cache, i * sizeof(VRingPackedDesc),
+                              desc, sizeof(VRingPackedDesc));
+    virtio_tswap16s(vdev, &desc->flags);
+    virtio_tswap64s(vdev, &desc->addr);
+    virtio_tswap32s(vdev, &desc->len);
+    virtio_tswap16s(vdev, &desc->id);
+}
+
 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
                     VRingPackedDesc *desc, MemoryRegionCache *cache, int i)
 {
@@ -667,9 +678,9 @@ static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
     return VIRTQUEUE_READ_DESC_MORE;
 }
 
-void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
-                               unsigned int *out_bytes,
-                               unsigned max_in_bytes, unsigned max_out_bytes)
+static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
+                            unsigned int *in_bytes, unsigned int *out_bytes,
+                            unsigned max_in_bytes, unsigned max_out_bytes)
 {
     VirtIODevice *vdev = vq->vdev;
     unsigned int max, idx;
@@ -679,27 +690,12 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
     int64_t len = 0;
     int rc;
 
-    if (unlikely(!vq->vring.desc)) {
-        if (in_bytes) {
-            *in_bytes = 0;
-        }
-        if (out_bytes) {
-            *out_bytes = 0;
-        }
-        return;
-    }
-
     rcu_read_lock();
     idx = vq->last_avail_idx;
     total_bufs = in_total = out_total = 0;
 
     max = vq->vring.num;
     caches = vring_get_region_caches(vq);
-    if (caches->desc.len < max * sizeof(VRingDesc)) {
-        virtio_error(vdev, "Cannot map descriptor ring");
-        goto err;
-    }
-
     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
         MemoryRegionCache *desc_cache = &caches->desc;
         unsigned int num_bufs;
@@ -792,6 +788,171 @@ err:
     goto done;
 }
 
+static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
+                            unsigned int *in_bytes, unsigned int *out_bytes,
+                            unsigned max_in_bytes, unsigned max_out_bytes)
+{
+    VirtIODevice *vdev = vq->vdev;
+    unsigned int max, idx;
+    unsigned int total_bufs, in_total, out_total;
+    MemoryRegionCache *desc_cache;
+    VRingMemoryRegionCaches *caches;
+    MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
+    int64_t len = 0;
+    VRingPackedDesc desc;
+    bool wrap_counter;
+
+    rcu_read_lock();
+    idx = vq->last_avail_idx;
+    wrap_counter = vq->last_avail_wrap_counter;
+    total_bufs = in_total = out_total = 0;
+
+    max = vq->vring.num;
+    caches = vring_get_region_caches(vq);
+    desc_cache = &caches->desc;
+    vring_packed_desc_read_flags(vdev, &desc, desc_cache, idx);
+    while (is_desc_avail(&desc, wrap_counter)) {
+        unsigned int num_bufs;
+        unsigned int i = 0;
+
+        num_bufs = total_bufs;
+
+        /* Make sure flags has been read before all the fields. */
+        smp_rmb();
+        vring_packed_desc_read(vdev, &desc, desc_cache, idx);
+
+        if (desc.flags & VRING_DESC_F_INDIRECT) {
+            if (desc.len % sizeof(VRingPackedDesc)) {
+                virtio_error(vdev, "Invalid size for indirect buffer table");
+                goto err;
+            }
+
+            /* If we've got too many, that implies a descriptor loop. */
+            if (num_bufs >= max) {
+                virtio_error(vdev, "Looped descriptor");
+                goto err;
+            }
+
+            /* loop over the indirect descriptor table */
+            len = address_space_cache_init(&indirect_desc_cache,
+                                           vdev->dma_as,
+                                           desc.addr, desc.len, false);
+            desc_cache = &indirect_desc_cache;
+            if (len < desc.len) {
+                virtio_error(vdev, "Cannot map indirect buffer");
+                goto err;
+            }
+
+            max = desc.len / sizeof(VRingPackedDesc);
+            num_bufs = i = 0;
+            vring_packed_desc_read(vdev, &desc, desc_cache, i);
+        }
+
+        do {
+            /* If we've got too many, that implies a descriptor loop. */
+            if (++num_bufs > max) {
+                virtio_error(vdev, "Looped descriptor");
+                goto err;
+            }
+
+            if (desc.flags & VRING_DESC_F_WRITE) {
+                in_total += desc.len;
+            } else {
+                out_total += desc.len;
+            }
+            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
+                goto done;
+            }
+
+            if (desc_cache == &indirect_desc_cache) {
+                if (++i >= max) {
+                    break;
+                }
+                vring_packed_desc_read(vdev, &desc, desc_cache, i);
+            } else {
+                if (++idx >= vq->vring.num) {
+                    idx -= vq->vring.num;
+                    wrap_counter ^= 1;
+                }
+                vring_packed_desc_read(vdev, &desc, desc_cache, idx);
+            }
+            /* Make sure flags has been read after all the other fields */
+            smp_rmb();
+        } while (desc.flags & VRING_DESC_F_NEXT);
+
+        if (desc_cache == &indirect_desc_cache) {
+            address_space_cache_destroy(&indirect_desc_cache);
+            total_bufs++;
+            /* We missed one step on for indirect desc */
+            idx++;
+            if (++idx >= vq->vring.num) {
+                idx -= vq->vring.num;
+                wrap_counter ^= 1;
+            }
+        } else {
+            total_bufs = num_bufs;
+        }
+
+        desc_cache = &caches->desc;
+        vring_packed_desc_read_flags(vdev, &desc, desc_cache, idx);
+    }
+
+    /* Record the index and wrap counter for a kick we want */
+    vq->shadow_avail_idx = idx;
+    vq->avail_wrap_counter = wrap_counter;
+done:
+    address_space_cache_destroy(&indirect_desc_cache);
+    if (in_bytes) {
+        *in_bytes = in_total;
+    }
+    if (out_bytes) {
+        *out_bytes = out_total;
+    }
+    rcu_read_unlock();
+    return;
+
+err:
+    in_total = out_total = 0;
+    goto done;
+}
+
+void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+                               unsigned int *out_bytes,
+                               unsigned max_in_bytes, unsigned max_out_bytes)
+{
+    uint16_t desc_size;
+    VRingMemoryRegionCaches *caches;
+
+    if (unlikely(!vq->vring.desc)) {
+        goto err;
+    }
+
+    caches = vring_get_region_caches(vq);
+    desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
+                                sizeof(VRingPackedDesc) : sizeof(VRingDesc);
+    if (caches->desc.len < vq->vring.num * desc_size) {
+        virtio_error(vq->vdev, "Cannot map descriptor ring");
+        goto err;
+    }
+
+    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+        virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
+                                         max_in_bytes, max_out_bytes);
+    } else {
+        virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
+                                        max_in_bytes, max_out_bytes);
+    }
+
+    return;
+err:
+    if (in_bytes) {
+        *in_bytes = 0;
+    }
+    if (out_bytes) {
+        *out_bytes = 0;
+    }
+}
+
 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
                           unsigned int out_bytes)
 {
-- 
1.8.3.1

  parent reply	other threads:[~2019-02-14  4:28 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-14  4:26 [Qemu-devel] [PATCH v4 00/11] packed ring virtio-net backends support wexu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 01/11] virtio: rename structure for packed ring wexu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 02/11] virtio: device/driver area size calculation helper for split ring wexu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 03/11] virtio: initialize packed ring region wexu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 04/11] virtio: initialize wrap counter for packed ring wexu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 05/11] virtio: queue/descriptor check helpers " wexu
2019-02-14  4:26 ` wexu [this message]
2019-02-18  7:27   ` [Qemu-devel] [PATCH v4 06/11] virtio: get avail bytes check " Jason Wang
2019-02-18 17:07     ` Wei Xu
2019-02-19  6:24       ` Jason Wang
2019-02-19  8:24         ` Wei Xu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 07/11] virtio: fill/flush/pop " wexu
2019-02-18  7:51   ` Jason Wang
2019-02-18 14:46     ` Wei Xu
2019-02-19  6:49       ` Jason Wang
2019-02-19  8:21         ` Wei Xu
2019-02-19  9:33           ` Jason Wang
2019-02-19 11:34             ` Wei Xu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 08/11] virtio: event suppression support " wexu
2019-02-19  7:19   ` Jason Wang
2019-02-19 10:40     ` Wei Xu
2019-02-19 13:06       ` Jason Wang
2019-02-20  2:17         ` Wei Xu
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 09/11] virtio-net: update the head descriptor in a chain lastly wexu
2019-02-19  7:23   ` Jason Wang
2019-02-19 10:51     ` Wei Xu
2019-02-19 13:09       ` Jason Wang
2019-02-20  1:54         ` Wei Xu
2019-02-20  2:34           ` Jason Wang
2019-02-20  4:01             ` Wei Xu
2019-02-20  7:53               ` Jason Wang
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 10/11] virtio: migration support for packed ring wexu
2019-02-19  7:30   ` Jason Wang
2019-02-19 11:00     ` Wei Xu
2019-02-19 13:12       ` Jason Wang
2019-02-14  4:26 ` [Qemu-devel] [PATCH v4 11/11] virtio: CLI and provide packed ring feature bit by default wexu
2019-02-19  7:32   ` Jason Wang
2019-02-19 11:23     ` Wei Xu
2019-02-19 13:33       ` Jason Wang
2019-02-20  0:46         ` Wei Xu
2019-02-19  7:35 ` [Qemu-devel] [PATCH v4 00/11] packed ring virtio-net backends support Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1550118402-4057-7-git-send-email-wexu@redhat.com \
    --to=wexu@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=jfreiman@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=tiwei.bie@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).