From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([140.186.70.92]:58779) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1R4ZN8-0003hr-0l for qemu-devel@nongnu.org; Fri, 16 Sep 2011 10:26:33 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1R4ZN6-00056A-71 for qemu-devel@nongnu.org; Fri, 16 Sep 2011 10:26:29 -0400 Received: from mail-ww0-f53.google.com ([74.125.82.53]:48095) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1R4ZN5-0004wc-V8 for qemu-devel@nongnu.org; Fri, 16 Sep 2011 10:26:28 -0400 Received: by mail-ww0-f53.google.com with SMTP id 14so4633242wwg.10 for ; Fri, 16 Sep 2011 07:26:27 -0700 (PDT) Sender: Paolo Bonzini From: Paolo Bonzini Date: Fri, 16 Sep 2011 16:25:51 +0200 Message-Id: <1316183152-5481-15-git-send-email-pbonzini@redhat.com> In-Reply-To: <1316183152-5481-1-git-send-email-pbonzini@redhat.com> References: <1316183152-5481-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [PATCH v2 14/15] nbd: split requests List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org qemu-nbd has a limit of slightly less than 1M per request. Work around this in the nbd block driver. Signed-off-by: Paolo Bonzini --- block/nbd.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 files changed, 46 insertions(+), 6 deletions(-) diff --git a/block/nbd.c b/block/nbd.c index f6efd7b..25abaf7 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -276,8 +276,9 @@ static int nbd_open(BlockDriverState *bs, const char* filename, int flags) return result; } -static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) +static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov, + int offset) { BDRVNBDState *s = bs->opaque; struct nbd_request request; @@ -291,15 +292,16 @@ static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, if (nbd_co_send_request(s, &request, NULL, 0) == -1) { reply.error = errno; } else { - nbd_co_receive_reply(s, &request, &reply, qiov->iov, 0); + nbd_co_receive_reply(s, &request, &reply, qiov->iov, offset); } nbd_coroutine_end(s, &request); return -reply.error; } -static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) +static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov, + int offset) { BDRVNBDState *s = bs->opaque; struct nbd_request request; @@ -314,7 +316,7 @@ static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, request.len = nb_sectors * 512; nbd_coroutine_start(s, &request); - if (nbd_co_send_request(s, &request, qiov->iov, 0) == -1) { + if (nbd_co_send_request(s, &request, qiov->iov, offset) == -1) { reply.error = errno; } else { nbd_co_receive_reply(s, &request, &reply, NULL, 0); @@ -323,6 +325,44 @@ static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, return -reply.error; } +/* qemu-nbd has a limit of slightly less than 1M per request. Try to + * remain aligned to 4K. */ +#define NBD_MAX_SECTORS 2040 + +static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + int offset = 0; + int ret; + while (nb_sectors > NBD_MAX_SECTORS) { + ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); + if (ret < 0) { + return ret; + } + offset += NBD_MAX_SECTORS * 512; + sector_num += NBD_MAX_SECTORS; + nb_sectors -= NBD_MAX_SECTORS; + } + return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset); +} + +static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + int offset = 0; + int ret; + while (nb_sectors > NBD_MAX_SECTORS) { + ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); + if (ret < 0) { + return ret; + } + offset += NBD_MAX_SECTORS * 512; + sector_num += NBD_MAX_SECTORS; + nb_sectors -= NBD_MAX_SECTORS; + } + return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset); +} + static int nbd_co_flush(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque; -- 1.7.6