From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:56886) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YvirB-0004WV-0V for qemu-devel@nongnu.org; Fri, 22 May 2015 05:03:10 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Yvir9-0004uG-Lo for qemu-devel@nongnu.org; Fri, 22 May 2015 05:03:04 -0400 Received: from mx1.redhat.com ([209.132.183.28]:39219) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Yvir9-0004tu-Bj for qemu-devel@nongnu.org; Fri, 22 May 2015 05:03:03 -0400 From: Stefan Hajnoczi Date: Fri, 22 May 2015 10:01:54 +0100 Message-Id: <1432285330-13994-23-git-send-email-stefanha@redhat.com> In-Reply-To: <1432285330-13994-1-git-send-email-stefanha@redhat.com> References: <1432285330-13994-1-git-send-email-stefanha@redhat.com> Subject: [Qemu-devel] [PULL 22/38] block/parallels: improve image reading performance List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Kevin Wolf , Peter Maydell , Roman Kagan , Stefan Hajnoczi , "Denis V. Lunev" From: "Denis V. Lunev" Try to perform IO for the biggest continuous block possible. The performance for sequential read is increased from 220 Mb/sec to 360 Mb/sec for continous image on my SSD HDD. Signed-off-by: Denis V. Lunev Reviewed-by: Roman Kagan Reviewed-by: Stefan Hajnoczi Signed-off-by: Roman Kagan Message-id: 1430207220-24458-23-git-send-email-den@openvz.org CC: Kevin Wolf Signed-off-by: Stefan Hajnoczi --- block/parallels.c | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/block/parallels.c b/block/parallels.c index 76e3a4e..5ff74e8 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -103,6 +103,35 @@ static int cluster_remainder(BDRVParallelsState *s, int64_t sector_num, return MIN(nb_sectors, ret); } +static int64_t block_status(BDRVParallelsState *s, int64_t sector_num, + int nb_sectors, int *pnum) +{ + int64_t start_off = -2, prev_end_off = -2; + + *pnum = 0; + while (nb_sectors > 0 || start_off == -2) { + int64_t offset = seek_to_sector(s, sector_num); + int to_end; + + if (start_off == -2) { + start_off = offset; + prev_end_off = offset; + } else if (offset != prev_end_off) { + break; + } + + to_end = cluster_remainder(s, sector_num, nb_sectors); + nb_sectors -= to_end; + sector_num += to_end; + *pnum += to_end; + + if (offset > 0) { + prev_end_off += to_end; + } + } + return start_off; +} + static int64_t allocate_cluster(BlockDriverState *bs, int64_t sector_num) { BDRVParallelsState *s = bs->opaque; @@ -148,11 +177,9 @@ static int64_t coroutine_fn parallels_co_get_block_status(BlockDriverState *bs, int64_t offset; qemu_co_mutex_lock(&s->lock); - offset = seek_to_sector(s, sector_num); + offset = block_status(s, sector_num, nb_sectors, pnum); qemu_co_mutex_unlock(&s->lock); - *pnum = cluster_remainder(s, sector_num, nb_sectors); - if (offset < 0) { return 0; } @@ -218,10 +245,9 @@ static coroutine_fn int parallels_co_readv(BlockDriverState *bs, int n, nbytes; qemu_co_mutex_lock(&s->lock); - position = seek_to_sector(s, sector_num); + position = block_status(s, sector_num, nb_sectors, &n); qemu_co_mutex_unlock(&s->lock); - n = cluster_remainder(s, sector_num, nb_sectors); nbytes = n << BDRV_SECTOR_BITS; if (position < 0) { -- 2.1.0