From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>
Subject: [Qemu-devel] [PULL 08/23] virtio: use MemoryRegionCache to access descriptors
Date: Fri, 17 Feb 2017 21:54:20 +0200 [thread overview]
Message-ID: <1487361200-29966-9-git-send-email-mst@redhat.com> (raw)
In-Reply-To: <1487361200-29966-1-git-send-email-mst@redhat.com>
From: Paolo Bonzini <pbonzini@redhat.com>
For now, the cache is created on every virtqueue_pop. Later on,
direct descriptors will be able to reuse it.
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
include/exec/memory.h | 2 ++
hw/virtio/virtio.c | 80 +++++++++++++++++++++++++--------------------------
2 files changed, 41 insertions(+), 41 deletions(-)
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 987f925..6911023 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -1426,6 +1426,8 @@ struct MemoryRegionCache {
bool is_write;
};
+#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mr = NULL })
+
/* address_space_cache_init: prepare for repeated access to a physical
* memory region
*
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 6ce6a26..71e41f6 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -120,9 +120,10 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
}
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
- uint8_t *desc_ptr, int i)
+ MemoryRegionCache *cache, int i)
{
- memcpy(desc, desc_ptr + i * sizeof(VRingDesc), sizeof(VRingDesc));
+ address_space_read_cached(cache, i * sizeof(VRingDesc),
+ desc, sizeof(VRingDesc));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap32s(vdev, &desc->len);
virtio_tswap16s(vdev, &desc->flags);
@@ -407,7 +408,7 @@ enum {
};
static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
- void *desc_ptr, unsigned int max,
+ MemoryRegionCache *desc_cache, unsigned int max,
unsigned int *next)
{
/* If this descriptor says it doesn't chain, we're done. */
@@ -425,7 +426,7 @@ static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
return VIRTQUEUE_READ_DESC_ERROR;
}
- vring_desc_read(vdev, desc, desc_ptr, *next);
+ vring_desc_read(vdev, desc, desc_cache, *next);
return VIRTQUEUE_READ_DESC_MORE;
}
@@ -436,24 +437,25 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
VirtIODevice *vdev = vq->vdev;
unsigned int max, idx;
unsigned int total_bufs, in_total, out_total;
- void *vring_desc_ptr;
- void *indirect_desc_ptr = NULL;
- hwaddr len = 0;
+ MemoryRegionCache vring_desc_cache;
+ MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
+ int64_t len = 0;
int rc;
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
max = vq->vring.num;
- len = max * sizeof(VRingDesc);
- vring_desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
+ len = address_space_cache_init(&vring_desc_cache, vdev->dma_as,
+ vq->vring.desc, max * sizeof(VRingDesc),
+ false);
if (len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto err;
}
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
- void *desc_ptr = vring_desc_ptr;
+ MemoryRegionCache *desc_cache = &vring_desc_cache;
unsigned int num_bufs;
VRingDesc desc;
unsigned int i;
@@ -464,10 +466,9 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
goto err;
}
- vring_desc_read(vdev, &desc, desc_ptr, i);
+ vring_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
- len = desc.len;
if (desc.len % sizeof(VRingDesc)) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto err;
@@ -480,9 +481,10 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
}
/* loop over the indirect descriptor table */
- indirect_desc_ptr = address_space_map(vdev->dma_as, desc.addr,
- &len, false);
- desc_ptr = indirect_desc_ptr;
+ len = address_space_cache_init(&indirect_desc_cache,
+ vdev->dma_as,
+ desc.addr, desc.len, false);
+ desc_cache = &indirect_desc_cache;
if (len < desc.len) {
virtio_error(vdev, "Cannot map indirect buffer");
goto err;
@@ -490,7 +492,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
max = desc.len / sizeof(VRingDesc);
num_bufs = i = 0;
- vring_desc_read(vdev, &desc, desc_ptr, i);
+ vring_desc_read(vdev, &desc, desc_cache, i);
}
do {
@@ -509,16 +511,15 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
goto done;
}
- rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
goto err;
}
- if (desc_ptr == indirect_desc_ptr) {
- address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
- indirect_desc_ptr = NULL;
+ if (desc_cache == &indirect_desc_cache) {
+ address_space_cache_destroy(&indirect_desc_cache);
total_bufs++;
} else {
total_bufs = num_bufs;
@@ -530,10 +531,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
}
done:
- if (indirect_desc_ptr) {
- address_space_unmap(vdev->dma_as, indirect_desc_ptr, len, false, 0);
- }
- address_space_unmap(vdev->dma_as, vring_desc_ptr, len, false, 0);
+ address_space_cache_destroy(&indirect_desc_cache);
+ address_space_cache_destroy(&vring_desc_cache);
if (in_bytes) {
*in_bytes = in_total;
}
@@ -673,10 +672,10 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
- void *vring_desc_ptr;
- void *indirect_desc_ptr = NULL;
- void *desc_ptr;
- hwaddr len;
+ MemoryRegionCache vring_desc_cache;
+ MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
+ MemoryRegionCache *desc_cache;
+ int64_t len;
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
unsigned out_num, in_num;
@@ -715,15 +714,16 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
i = head;
- len = max * sizeof(VRingDesc);
- vring_desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
+ len = address_space_cache_init(&vring_desc_cache, vdev->dma_as,
+ vq->vring.desc, max * sizeof(VRingDesc),
+ false);
if (len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
}
- desc_ptr = vring_desc_ptr;
- vring_desc_read(vdev, &desc, desc_ptr, i);
+ desc_cache = &vring_desc_cache;
+ vring_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
virtio_error(vdev, "Invalid size for indirect buffer table");
@@ -731,9 +731,9 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
}
/* loop over the indirect descriptor table */
- len = desc.len;
- indirect_desc_ptr = address_space_map(vdev->dma_as, desc.addr, &len, false);
- desc_ptr = indirect_desc_ptr;
+ len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
+ desc.addr, desc.len, false);
+ desc_cache = &indirect_desc_cache;
if (len < desc.len) {
virtio_error(vdev, "Cannot map indirect buffer");
goto done;
@@ -741,7 +741,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
max = desc.len / sizeof(VRingDesc);
i = 0;
- vring_desc_read(vdev, &desc, desc_ptr, i);
+ vring_desc_read(vdev, &desc, desc_cache, i);
}
/* Collect all the descriptors */
@@ -772,7 +772,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
goto err_undo_map;
}
- rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
@@ -795,10 +795,8 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
done:
- if (indirect_desc_ptr) {
- address_space_unmap(vdev->dma_as, indirect_desc_ptr, len, false, 0);
- }
- address_space_unmap(vdev->dma_as, vring_desc_ptr, len, false, 0);
+ address_space_cache_destroy(&indirect_desc_cache);
+ address_space_cache_destroy(&vring_desc_cache);
return elem;
--
MST
next prev parent reply other threads:[~2017-02-17 19:54 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-17 19:53 [Qemu-devel] [PULL 00/23] virtio, pci: fixes, features Michael S. Tsirkin
2017-02-17 19:53 ` [Qemu-devel] [PULL 01/23] pci/pcie: don't assume cap id 0 is reserved Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 02/23] virtio: Report real progress in VQ aio poll handler Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 03/23] docs: add document to explain the usage of vNVDIMM Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 04/23] memory: make memory_listener_unregister idempotent Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 05/23] virtio: add virtio_*_phys_cached Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 06/23] virtio: use address_space_map/unmap to access descriptors Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 07/23] exec: make address_space_cache_destroy idempotent Michael S. Tsirkin
2017-02-17 19:54 ` Michael S. Tsirkin [this message]
2017-02-17 19:54 ` [Qemu-devel] [PULL 09/23] virtio: add MemoryListener to cache ring translations Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 10/23] virtio: use VRingMemoryRegionCaches for descriptor ring Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 11/23] virtio: check for vring setup in virtio_queue_update_used_idx Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 12/23] virtio: use VRingMemoryRegionCaches for avail and used rings Michael S. Tsirkin
2017-02-21 12:57 ` Gerd Hoffmann
2017-02-21 16:25 ` Laszlo Ersek
2017-02-21 17:54 ` Laszlo Ersek
2017-02-22 9:03 ` Paolo Bonzini
2017-02-23 0:32 ` Alex Williamson
2017-02-23 9:33 ` Cédric Le Goater
2017-02-23 9:47 ` Paolo Bonzini
2017-02-23 11:56 ` Cédric Le Goater
2017-02-21 18:08 ` Paolo Bonzini
2017-02-21 19:07 ` Laszlo Ersek
2017-02-21 20:04 ` Gerd Hoffmann
2017-02-17 19:54 ` [Qemu-devel] [PULL 13/23] virtio: Fix no interrupt when not creating msi controller Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 14/23] pcie: simplify pcie_add_capability() Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 15/23] vfio: trace map/unmap for notify as well Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 16/23] vfio: introduce vfio_get_vaddr() Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 17/23] vfio: allow to notify unmap for very large region Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 18/23] intel_iommu: add "caching-mode" option Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 19/23] intel_iommu: simplify irq region translation Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 20/23] intel_iommu: renaming gpa to iova where proper Michael S. Tsirkin
2017-02-17 19:55 ` [Qemu-devel] [PULL 21/23] intel_iommu: convert dbg macros to traces for inv Michael S. Tsirkin
2017-02-17 19:55 ` [Qemu-devel] [PULL 22/23] intel_iommu: convert dbg macros to trace for trans Michael S. Tsirkin
2017-02-17 19:55 ` [Qemu-devel] [PULL 23/23] intel_iommu: vtd_slpt_level_shift check level Michael S. Tsirkin
2017-02-20 11:55 ` [Qemu-devel] [PULL 00/23] virtio, pci: fixes, features Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1487361200-29966-9-git-send-email-mst@redhat.com \
--to=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).