From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:46705) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UPAH6-0007TJ-SP for qemu-devel@nongnu.org; Mon, 08 Apr 2013 07:30:19 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UPAH0-0006ae-Nl for qemu-devel@nongnu.org; Mon, 08 Apr 2013 07:30:12 -0400 Received: from mail-we0-x22a.google.com ([2a00:1450:400c:c03::22a]:62370) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UPAH0-0006aA-HP for qemu-devel@nongnu.org; Mon, 08 Apr 2013 07:30:06 -0400 Received: by mail-we0-f170.google.com with SMTP id z2so4358503wey.15 for ; Mon, 08 Apr 2013 04:30:05 -0700 (PDT) Sender: Paolo Bonzini From: Paolo Bonzini Date: Mon, 8 Apr 2013 13:29:55 +0200 Message-Id: <1365420597-5506-3-git-send-email-pbonzini@redhat.com> In-Reply-To: <1365420597-5506-1-git-send-email-pbonzini@redhat.com> References: <1365420597-5506-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [PATCH 2/4] migration: use a single I/O operation when writev_buffer is not defined List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: kwolf@redhat.com, owasserm@redhat.com, quintela@redhat.com The recent patches to use vectored I/O for RAM migration caused a regression in savevm speed. To restore previous performance, add data to the buffer in qemu_put_buffer_async whenever writev_buffer is not available in the QEMUFile. Signed-off-by: Paolo Bonzini --- savevm.c | 49 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/savevm.c b/savevm.c index c952c41..b32e0a7 100644 --- a/savevm.c +++ b/savevm.c @@ -525,27 +525,24 @@ static void qemu_file_set_error(QEMUFile *f, int ret) static void qemu_fflush(QEMUFile *f) { ssize_t ret = 0; - int i = 0; if (!f->ops->writev_buffer && !f->ops->put_buffer) { return; } - if (f->is_write && f->iovcnt > 0) { + if (f->is_write) { if (f->ops->writev_buffer) { - ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt); - if (ret >= 0) { - f->pos += ret; + if (f->iovcnt > 0) { + ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt); } } else { - for (i = 0; i < f->iovcnt && ret >= 0; i++) { - ret = f->ops->put_buffer(f->opaque, f->iov[i].iov_base, f->pos, - f->iov[i].iov_len); - if (ret >= 0) { - f->pos += ret; - } + if (f->buf_index > 0) { + ret = f->ops->put_buffer(f->opaque, f->buf, f->pos, f->buf_index); } } + if (ret >= 0) { + f->pos += ret; + } f->buf_index = 0; f->iovcnt = 0; } @@ -640,6 +637,11 @@ static void add_to_iovec(QEMUFile *f, const uint8_t *buf, int size) void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size) { + if (!f->ops->writev_buffer) { + qemu_put_buffer(f, buf, size); + return; + } + if (f->last_error) { return; } @@ -673,8 +675,17 @@ void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size) if (l > size) l = size; memcpy(f->buf + f->buf_index, buf, l); - f->buf_index += l; - qemu_put_buffer_async(f, f->buf + (f->buf_index - l), l); + f->bytes_xfer += size; + if (f->ops->writev_buffer) { + add_to_iovec(f, f->buf + f->buf_index, l); + f->buf_index += l; + } else { + f->is_write = 1; + f->buf_index += l; + if (f->buf_index == IO_BUF_SIZE) { + qemu_fflush(f); + } + } if (qemu_file_get_error(f)) { break; } @@ -697,8 +708,16 @@ void qemu_put_byte(QEMUFile *f, int v) f->buf[f->buf_index] = v; f->bytes_xfer++; - add_to_iovec(f, f->buf + f->buf_index, 1); - f->buf_index++; + if (f->ops->writev_buffer) { + add_to_iovec(f, f->buf + f->buf_index, 1); + f->buf_index++; + } else { + f->is_write = 1; + f->buf_index++; + if (f->buf_index == IO_BUF_SIZE) { + qemu_fflush(f); + } + } } static void qemu_file_skip(QEMUFile *f, int size) -- 1.8.2