From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:34437) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YVdrj-0008ON-1c for qemu-devel@nongnu.org; Wed, 11 Mar 2015 06:27:53 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YVdrf-0003wO-3F for qemu-devel@nongnu.org; Wed, 11 Mar 2015 06:27:50 -0400 Received: from mailhub.sw.ru ([195.214.232.25]:47737 helo=relay.sw.ru) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YVdre-0003uD-Iq for qemu-devel@nongnu.org; Wed, 11 Mar 2015 06:27:46 -0400 From: "Denis V. Lunev" Date: Wed, 11 Mar 2015 13:28:16 +0300 Message-Id: <1426069701-1405-23-git-send-email-den@openvz.org> In-Reply-To: <1426069701-1405-1-git-send-email-den@openvz.org> References: <1426069701-1405-1-git-send-email-den@openvz.org> Subject: [Qemu-devel] [PATCH 22/27] block/parallels: improve image reading performance List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Kevin Wolf , "Denis V. Lunev" , qemu-devel@nongnu.org, Stefan Hajnoczi Try to perform IO for the biggest continuous block possible. The performance for sequential read is increased from 220 Gb/sec to 360 Gb/sec for continous image on my SSD HDD. Signed-off-by: Denis V. Lunev Reviewed-by: Roman Kagan CC: Kevin Wolf CC: Stefan Hajnoczi --- block/parallels.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/block/parallels.c b/block/parallels.c index 2605c1a..0f255fe 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -97,6 +97,35 @@ static int cluster_remainder(BDRVParallelsState *s, int64_t sector_num, return MIN(nb_sectors, ret); } +static int64_t block_status(BDRVParallelsState *s, int64_t sector_num, + int nb_sectors, int *pnum) +{ + int64_t start_off = -2, prev_end_off = -2; + + *pnum = 0; + while (nb_sectors > 0 || start_off == -2) { + int64_t offset = seek_to_sector(s, sector_num); + int to_end; + + if (start_off == -2) { + start_off = offset; + prev_end_off = offset; + } else if (offset != prev_end_off) { + break; + } + + to_end = cluster_remainder(s, sector_num, nb_sectors); + nb_sectors -= to_end; + sector_num += to_end; + *pnum += to_end; + + if (offset > 0) { + prev_end_off += to_end; + } + } + return start_off; +} + static int64_t allocate_cluster(BlockDriverState *bs, int64_t sector_num) { BDRVParallelsState *s = bs->opaque; @@ -134,11 +163,9 @@ static int64_t coroutine_fn parallels_co_get_block_status(BlockDriverState *bs, int64_t offset; qemu_co_mutex_lock(&s->lock); - offset = seek_to_sector(s, sector_num); + offset = block_status(s, sector_num, nb_sectors, pnum); qemu_co_mutex_unlock(&s->lock); - *pnum = cluster_remainder(s, sector_num, nb_sectors); - if (offset < 0) { return 0; } @@ -202,8 +229,8 @@ static coroutine_fn int parallels_co_readv(BlockDriverState *bs, qemu_co_mutex_lock(&s->lock); while (nb_sectors > 0) { - int64_t position = seek_to_sector(s, sector_num); - int n = cluster_remainder(s, sector_num, nb_sectors); + int n; + int64_t position = block_status(s, sector_num, nb_sectors, &n); int nbytes = n << BDRV_SECTOR_BITS; if (position < 0) { -- 1.9.1