From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org, qemu-block@nongnu.org
Subject: [Qemu-devel] [PATCH 06/40] virtio: introduce virtqueue_alloc_element
Date: Tue, 24 Nov 2015 19:00:57 +0100 [thread overview]
Message-ID: <1448388091-117282-7-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1448388091-117282-1-git-send-email-pbonzini@redhat.com>
Allocate the arrays for in_addr/out_addr/in_sg/out_sg outside the
VirtQueueElement. For now, virtqueue_pop and vring_pop keep
allocating a very large VirtQueueElement.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
hw/virtio/dataplane/vring.c | 2 +-
hw/virtio/virtio.c | 60 +++++++++++++++++++++++++++++++--------------
include/hw/virtio/virtio.h | 9 ++++---
3 files changed, 47 insertions(+), 24 deletions(-)
diff --git a/hw/virtio/dataplane/vring.c b/hw/virtio/dataplane/vring.c
index 1b900fc..c950caa 100644
--- a/hw/virtio/dataplane/vring.c
+++ b/hw/virtio/dataplane/vring.c
@@ -402,7 +402,7 @@ void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz)
goto out;
}
- elem = g_malloc(sz);
+ elem = virtqueue_alloc_element(sz, VIRTQUEUE_MAX_SIZE, VIRTQUEUE_MAX_SIZE);
/* Initialize elem so it can be safely unmapped */
elem->in_num = elem->out_num = 0;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index f5f8108..32c89eb 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -494,11 +494,29 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
void virtqueue_map(VirtQueueElement *elem)
{
virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
- MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)),
- 1);
+ VIRTQUEUE_MAX_SIZE, 1);
virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
- MIN(ARRAY_SIZE(elem->out_sg), ARRAY_SIZE(elem->out_addr)),
- 0);
+ VIRTQUEUE_MAX_SIZE, 0);
+}
+
+void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
+{
+ VirtQueueElement *elem;
+ size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
+ size_t out_addr_ofs = in_addr_ofs + in_num * sizeof (elem->in_addr[0]);
+ size_t out_addr_end = out_addr_ofs + out_num * sizeof (elem->out_addr[0]);
+ size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
+ size_t out_sg_ofs = in_sg_ofs + in_num * sizeof (elem->in_sg[0]);
+ size_t out_sg_end = out_sg_ofs + out_num * sizeof (elem->out_sg[0]);
+
+ elem = g_malloc(out_sg_end);
+ elem->out_num = out_num;
+ elem->in_num = in_num;
+ elem->in_addr = (void*)elem + in_addr_ofs;
+ elem->out_addr = (void*)elem + out_addr_ofs;
+ elem->in_sg = (void*)elem + in_sg_ofs;
+ elem->out_sg = (void*)elem + out_sg_ofs;
+ return elem;
}
void *virtqueue_pop(VirtQueue *vq, size_t sz)
@@ -513,7 +531,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
}
/* When we start there are none of either input nor output. */
- elem = g_malloc(sz);
+ elem = virtqueue_alloc_element(sz, VIRTQUEUE_MAX_SIZE, VIRTQUEUE_MAX_SIZE);
elem->out_num = elem->in_num = 0;
max = vq->vring.num;
@@ -540,14 +558,14 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
struct iovec *sg;
if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
- if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
+ if (elem->in_num >= VIRTQUEUE_MAX_SIZE) {
error_report("Too many write descriptors in indirect table");
exit(1);
}
elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i);
sg = &elem->in_sg[elem->in_num++];
} else {
- if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
+ if (elem->out_num >= VIRTQUEUE_MAX_SIZE) {
error_report("Too many read descriptors in indirect table");
exit(1);
}
@@ -577,31 +595,35 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
{
- VirtQueueElement *elem = g_malloc(sz);
+ VirtQueueElement *elem;
bool swap;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ uint32_t index, out_num, in_num;
uint64_t scratch;
int i;
- qemu_get_be32s(f, &elem->index);
- qemu_get_be32s(f, &elem->out_num);
- qemu_get_be32s(f, &elem->in_num);
+ qemu_get_be32s(f, &index);
+ qemu_get_be32s(f, &out_num);
+ qemu_get_be32s(f, &in_num);
- swap = (elem->out_num & 0xFFFF0000) || (elem->in_num & 0xFFFF0000);
+ swap = (out_num & 0xFFFF0000) || (in_num & 0xFFFF0000);
if (swap) {
- bswap32s(&elem->index);
- bswap32s(&elem->out_num);
- bswap32s(&elem->in_num);
+ bswap32s(&index);
+ bswap32s(&out_num);
+ bswap32s(&in_num);
}
+ elem = virtqueue_alloc_element(sz, out_num, in_num);
+ elem->index = index;
+
for (i = 0; i < elem->in_num; i++) {
qemu_get_be64s(f, &elem->in_addr[i]);
if (swap) {
bswap64s(&elem->in_addr[i]);
}
}
- if (i < ARRAY_SIZE(addr)) {
+ if (i < VIRTQUEUE_MAX_SIZE) {
qemu_get_buffer(f, (uint8_t *)addr, sizeof(addr) - i * sizeof(addr[0]));
}
@@ -611,7 +633,7 @@ void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
bswap64s(&elem->out_addr[i]);
}
}
- if (i < ARRAY_SIZE(addr)) {
+ if (i < VIRTQUEUE_MAX_SIZE) {
qemu_get_buffer(f, (uint8_t *)addr, sizeof(addr) - i * sizeof(addr[0]));
}
@@ -623,7 +645,7 @@ void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
}
elem->in_sg[i].iov_len = scratch;
}
- if (i < ARRAY_SIZE(iov)) {
+ if (i < VIRTQUEUE_MAX_SIZE) {
qemu_get_buffer(f, (uint8_t *)iov, sizeof(iov) - i * sizeof(iov[0]));
}
@@ -635,7 +657,7 @@ void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
}
elem->out_sg[i].iov_len = scratch;
}
- if (i < ARRAY_SIZE(iov)) {
+ if (i < VIRTQUEUE_MAX_SIZE) {
qemu_get_buffer(f, (uint8_t *)iov, sizeof(iov) - i * sizeof(iov[0]));
}
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 44da9a8..108cdb0 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -46,10 +46,10 @@ typedef struct VirtQueueElement
unsigned int index;
unsigned int out_num;
unsigned int in_num;
- hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
- hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
- struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
- struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
+ hwaddr *in_addr;
+ hwaddr *out_addr;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
} VirtQueueElement;
#define VIRTIO_QUEUE_MAX 1024
@@ -143,6 +143,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
void virtio_del_queue(VirtIODevice *vdev, int n);
+void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num);
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len);
void virtqueue_flush(VirtQueue *vq, unsigned int count);
--
1.8.3.1
next prev parent reply other threads:[~2015-11-24 18:01 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-24 18:00 [Qemu-devel] [RFC PATCH 00/40] Sneak peek of virtio and dataplane changes for 2.6 Paolo Bonzini
2015-11-24 18:00 ` [Qemu-devel] [PATCH 01/40] 9pfs: allocate pdus with g_malloc/g_free Paolo Bonzini
2015-11-30 2:27 ` Fam Zheng
2015-11-30 2:33 ` Fam Zheng
2015-11-30 16:35 ` Greg Kurz
2015-11-24 18:00 ` [Qemu-devel] [PATCH 02/40] virtio: move VirtQueueElement at the beginning of the structs Paolo Bonzini
2015-11-24 18:00 ` [Qemu-devel] [PATCH 03/40] virtio: move allocation to virtqueue_pop/vring_pop Paolo Bonzini
2015-11-30 3:00 ` Fam Zheng
2015-11-24 18:00 ` [Qemu-devel] [PATCH 04/40] virtio: introduce qemu_get/put_virtqueue_element Paolo Bonzini
2015-11-24 18:00 ` [Qemu-devel] [PATCH 05/40] virtio: read/write the VirtQueueElement a field at a time Paolo Bonzini
2015-11-30 9:47 ` Fam Zheng
2015-11-30 10:37 ` Paolo Bonzini
2015-11-24 18:00 ` Paolo Bonzini [this message]
2015-11-24 18:00 ` [Qemu-devel] [PATCH 07/40] virtio: slim down allocation of VirtQueueElements Paolo Bonzini
2015-11-30 3:24 ` Fam Zheng
2015-11-30 8:36 ` Paolo Bonzini
2015-11-24 18:00 ` [Qemu-devel] [PATCH 08/40] vring: " Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 09/40] vring: make vring_enable_notification return void Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 10/40] virtio: combine the read of a descriptor Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 11/40] virtio: add AioContext-specific function for host notifiers Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 12/40] virtio: export vring_notify as virtio_should_notify Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 13/40] virtio-blk: fix "disabled data plane" mode Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 14/40] virtio-blk: do not use vring in dataplane Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 15/40] virtio-scsi: " Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 16/40] vring: remove Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 17/40] iothread: release AioContext around aio_poll Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 18/40] qemu-thread: introduce QemuRecMutex Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 19/40] aio: convert from RFifoLock to QemuRecMutex Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 20/40] aio: rename bh_lock to list_lock Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 21/40] qemu-thread: introduce QemuLockCnt Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 22/40] aio: make ctx->list_lock a QemuLockCnt, subsuming ctx->walking_bh Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 23/40] qemu-thread: optimize QemuLockCnt with futexes on Linux Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 24/40] aio: tweak walking in dispatch phase Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 25/40] aio-posix: remove walking_handlers, protecting AioHandler list with list_lock Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 26/40] aio-win32: " Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 27/40] aio: document locking Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 28/40] aio: push aio_context_acquire/release down to dispatching Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 29/40] quorum: use atomics for rewrite_count Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 30/40] quorum: split quorum_fifo_aio_cb from quorum_aio_cb Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 31/40] qed: introduce qed_aio_start_io and qed_aio_next_io_cb Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 32/40] block: explicitly acquire aiocontext in callbacks that need it Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 33/40] block: explicitly acquire aiocontext in bottom halves " Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 34/40] block: explicitly acquire aiocontext in timers " Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 35/40] block: explicitly acquire aiocontext in aio callbacks " Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 36/40] aio: update locking documentation Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 37/40] async: optimize aio_bh_poll Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 38/40] aio-posix: partially inline aio_dispatch into aio_poll Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 39/40] async: remove unnecessary inc/dec pairs Paolo Bonzini
2015-11-24 18:01 ` [Qemu-devel] [PATCH 40/40] dma-helpers: avoid lock inversion with AioContext Paolo Bonzini
2015-11-26 9:36 ` [Qemu-devel] [RFC PATCH 00/40] Sneak peek of virtio and dataplane changes for 2.6 Christian Borntraeger
2015-11-26 9:41 ` Christian Borntraeger
2015-11-26 10:39 ` Paolo Bonzini
2015-12-09 20:35 ` Paolo Bonzini
2015-12-16 12:54 ` Christian Borntraeger
2015-12-16 14:40 ` Christian Borntraeger
2015-12-16 17:42 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1448388091-117282-7-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).