From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:51598) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YVFsR-0007Y5-VK for qemu-devel@nongnu.org; Tue, 10 Mar 2015 04:51:03 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YVFsN-0007ki-HM for qemu-devel@nongnu.org; Tue, 10 Mar 2015 04:50:59 -0400 Received: from mailhub.sw.ru ([195.214.232.25]:14076 helo=relay.sw.ru) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YVFsM-0007ir-Oy for qemu-devel@nongnu.org; Tue, 10 Mar 2015 04:50:55 -0400 From: "Denis V. Lunev" Date: Tue, 10 Mar 2015 11:51:16 +0300 Message-Id: <1425977481-13317-23-git-send-email-den@openvz.org> In-Reply-To: <1425977481-13317-1-git-send-email-den@openvz.org> References: <1425977481-13317-1-git-send-email-den@openvz.org> Subject: [Qemu-devel] [PATCH 22/27] block/parallels: improve image reading performance List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Kevin Wolf , "Denis V. Lunev" , qemu-devel@nongnu.org, Stefan Hajnoczi , Roman Kagan Try to perform IO for the biggest continuous block possible. The performance for sequential read is increased from 220 Gb/sec to 360 Gb/sec for continous image on my SSD HDD. Signed-off-by: Denis V. Lunev CC: Roman Kagan CC: Kevin Wolf CC: Stefan Hajnoczi --- block/parallels.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/block/parallels.c b/block/parallels.c index e8b3d09..d0128c9 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -96,6 +96,35 @@ static int cluster_remainder(BDRVParallelsState *s, int64_t sector_num, return MIN(nb_sectors, ret); } +static int64_t block_status(BDRVParallelsState *s, int64_t sector_num, + int nb_sectors, int *pnum) +{ + int64_t start_off = -2, prev_end_off = -2; + + *pnum = 0; + while (nb_sectors > 0 || start_off == -2) { + int64_t offset = seek_to_sector(s, sector_num); + int to_end; + + if (start_off == -2) { + start_off = offset; + prev_end_off = offset; + } else if (offset != prev_end_off) { + break; + } + + to_end = cluster_remainder(s, sector_num, nb_sectors); + nb_sectors -= to_end; + sector_num += to_end; + *pnum += to_end; + + if (offset > 0) { + prev_end_off += to_end; + } + } + return start_off; +} + static int64_t allocate_cluster(BlockDriverState *bs, int64_t sector_num) { BDRVParallelsState *s = bs->opaque; @@ -133,11 +162,9 @@ static int64_t coroutine_fn parallels_co_get_block_status(BlockDriverState *bs, int64_t offset; qemu_co_mutex_lock(&s->lock); - offset = seek_to_sector(s, sector_num); + offset = block_status(s, sector_num, nb_sectors, pnum); qemu_co_mutex_unlock(&s->lock); - *pnum = cluster_remainder(s, sector_num, nb_sectors); - if (offset < 0) { return 0; } @@ -201,8 +228,8 @@ static coroutine_fn int parallels_co_readv(BlockDriverState *bs, qemu_co_mutex_lock(&s->lock); while (nb_sectors > 0) { - int64_t position = seek_to_sector(s, sector_num); - int n = cluster_remainder(s, sector_num, nb_sectors); + int n; + int64_t position = block_status(s, sector_num, nb_sectors, &n); int nbytes = n << BDRV_SECTOR_BITS; if (position < 0) { -- 1.9.1