From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: stefanha@redhat.com, famz@redhat.com, mst@redhat.com,
borntraeger@de.ibm.com
Subject: [Qemu-devel] [PATCH 07/11] virtio: use address_space_map/unmap to access descriptors
Date: Mon, 12 Dec 2016 12:18:53 +0100 [thread overview]
Message-ID: <20161212111857.23399-8-pbonzini@redhat.com> (raw)
In-Reply-To: <20161212111857.23399-1-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
hw/virtio/virtio.c | 78 +++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 60 insertions(+), 18 deletions(-)
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 568f4be..459f9dd 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -119,10 +119,9 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
}
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
- hwaddr desc_pa, int i)
+ uint8_t *desc_ptr, int i)
{
- address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
- MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
+ memcpy(desc, desc_ptr + i * sizeof(VRingDesc), sizeof(VRingDesc));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap32s(vdev, &desc->len);
virtio_tswap16s(vdev, &desc->flags);
@@ -405,7 +404,7 @@ enum {
};
static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
- hwaddr desc_pa, unsigned int max,
+ void *desc_ptr, unsigned int max,
unsigned int *next)
{
/* If this descriptor says it doesn't chain, we're done. */
@@ -423,7 +422,7 @@ static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
return VIRTQUEUE_READ_DESC_ERROR;
}
- vring_desc_read(vdev, desc, desc_pa, *next);
+ vring_desc_read(vdev, desc, desc_ptr, *next);
return VIRTQUEUE_READ_DESC_MORE;
}
@@ -433,6 +432,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
{
unsigned int idx;
unsigned int total_bufs, in_total, out_total;
+ void *desc_ptr = NULL;
+ hwaddr len = 0;
int rc;
idx = vq->last_avail_idx;
@@ -442,7 +443,6 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
VirtIODevice *vdev = vq->vdev;
unsigned int max, num_bufs, indirect = 0;
VRingDesc desc;
- hwaddr desc_pa;
unsigned int i;
max = vq->vring.num;
@@ -452,10 +452,19 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
goto err;
}
- desc_pa = vq->vring.desc;
- vring_desc_read(vdev, &desc, desc_pa, i);
+ len = max * sizeof(VRingDesc);
+ desc_ptr = address_space_map(&address_space_memory, vq->vring.desc,
+ &len, false);
+ if (len < max * sizeof(VRingDesc)) {
+ virtio_error(vdev, "Cannot map descriptor ring");
+ goto err;
+ }
+
+ vring_desc_read(vdev, &desc, desc_ptr, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
+ address_space_unmap(&address_space_memory, desc_ptr, len, false, 0);
+ len = desc.len;
if (desc.len % sizeof(VRingDesc)) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto err;
@@ -468,11 +477,17 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
}
/* loop over the indirect descriptor table */
+ desc_ptr = address_space_map(&address_space_memory, desc.addr,
+ &len, false);
+ if (len < desc.len) {
+ virtio_error(vdev, "Cannot map indirect buffer");
+ goto err;
+ }
+
indirect = 1;
max = desc.len / sizeof(VRingDesc);
- desc_pa = desc.addr;
num_bufs = i = 0;
- vring_desc_read(vdev, &desc, desc_pa, i);
+ vring_desc_read(vdev, &desc, desc_ptr, i);
}
do {
@@ -491,7 +506,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
goto done;
}
- rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
@@ -509,6 +524,9 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
}
done:
+ if (desc_ptr) {
+ address_space_unmap(&address_space_memory, desc_ptr, len, false, 0);
+ }
if (in_bytes) {
*in_bytes = in_total;
}
@@ -656,9 +674,10 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
- hwaddr desc_pa = vq->vring.desc;
+ void *desc_ptr = NULL;
+ hwaddr len;
VirtIODevice *vdev = vq->vdev;
- VirtQueueElement *elem;
+ VirtQueueElement *elem = NULL;
unsigned out_num, in_num;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
@@ -694,18 +713,35 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
}
i = head;
- vring_desc_read(vdev, &desc, desc_pa, i);
+
+ len = max * sizeof(VRingDesc);
+ desc_ptr = address_space_map(&address_space_memory, vq->vring.desc, &len,
+ false);
+ if (len < max * sizeof(VRingDesc)) {
+ virtio_error(vdev, "Cannot map descriptor ring");
+ return NULL;
+ }
+
+ vring_desc_read(vdev, &desc, desc_ptr, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
+ address_space_unmap(&address_space_memory, desc_ptr, len, false, 0);
if (desc.len % sizeof(VRingDesc)) {
virtio_error(vdev, "Invalid size for indirect buffer table");
return NULL;
}
/* loop over the indirect descriptor table */
+ len = desc.len;
+ desc_ptr = address_space_map(&address_space_memory, desc.addr,
+ &len, false);
+ if (len < desc.len) {
+ virtio_error(vdev, "Cannot map indirect buffer");
+ return NULL;
+ }
+
max = desc.len / sizeof(VRingDesc);
- desc_pa = desc.addr;
i = 0;
- vring_desc_read(vdev, &desc, desc_pa, i);
+ vring_desc_read(vdev, &desc, desc_ptr, i);
}
/* Collect all the descriptors */
@@ -736,7 +772,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
goto err_undo_map;
}
- rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
} while (rc == VIRTQUEUE_READ_DESC_MORE);
if (rc == VIRTQUEUE_READ_DESC_ERROR) {
@@ -758,11 +794,17 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
+done:
+ if (desc_ptr) {
+ address_space_unmap(&address_space_memory, desc_ptr, len, false, 0);
+ }
+
return elem;
err_undo_map:
virtqueue_undo_map_desc(out_num, in_num, iov);
- return NULL;
+ elem = NULL;
+ goto done;
}
/* Reading and writing a structure directly to QEMUFile is *awful*, but
--
1.8.3.1
next prev parent reply other threads:[~2016-12-12 11:19 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-12-12 11:18 [Qemu-devel] [RFC PATCH 00/11] speedup vring processing with MemoryRegionCaches Paolo Bonzini
2016-12-12 11:18 ` [Qemu-devel] [PATCH 01/11] exec: optimize remaining address_space_* cases Paolo Bonzini
2016-12-12 13:27 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 02/11] exec: introduce memory_ldst.inc.c Paolo Bonzini
2016-12-12 13:44 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 03/11] exec: introduce address_space_extend_translation Paolo Bonzini
2016-12-12 13:47 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 04/11] exec: introduce MemoryRegionCache Paolo Bonzini
2016-12-12 14:06 ` Stefan Hajnoczi
2016-12-13 13:14 ` Paolo Bonzini
2016-12-12 11:18 ` [Qemu-devel] [PATCH 05/11] virtio: make virtio_should_notify static Paolo Bonzini
2016-12-12 14:07 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 06/11] virtio: add virtio_*_phys_cached Paolo Bonzini
2016-12-12 14:08 ` Stefan Hajnoczi
2016-12-12 11:18 ` Paolo Bonzini [this message]
2016-12-12 14:12 ` [Qemu-devel] [PATCH 07/11] virtio: use address_space_map/unmap to access descriptors Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 08/11] virtio: use MemoryRegionCache " Paolo Bonzini
2016-12-12 14:17 ` Stefan Hajnoczi
2016-12-13 11:14 ` Paolo Bonzini
2016-12-12 11:18 ` [Qemu-devel] [PATCH 09/11] virtio: add MemoryListener to cache ring translations Paolo Bonzini
2016-12-12 14:24 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 10/11] virtio: use VRingMemoryRegionCaches for descriptor ring Paolo Bonzini
2016-12-12 16:06 ` Stefan Hajnoczi
2016-12-12 11:18 ` [Qemu-devel] [PATCH 11/11] virtio: use VRingMemoryRegionCaches for avail and used rings Paolo Bonzini
2016-12-12 16:08 ` Stefan Hajnoczi
2016-12-12 16:11 ` [Qemu-devel] [RFC PATCH 00/11] speedup vring processing with MemoryRegionCaches Stefan Hajnoczi
2016-12-13 12:56 ` Christian Borntraeger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20161212111857.23399-8-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=borntraeger@de.ibm.com \
--cc=famz@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).