From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([140.186.70.92]:52606) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1RqT3H-0004zz-7l for qemu-devel@nongnu.org; Thu, 26 Jan 2012 12:24:16 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1RqT2r-0003SE-KQ for qemu-devel@nongnu.org; Thu, 26 Jan 2012 12:23:46 -0500 Received: from mail-gx0-f173.google.com ([209.85.161.173]:46582) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1RqT2r-0003NH-HB for qemu-devel@nongnu.org; Thu, 26 Jan 2012 12:23:33 -0500 Received: by mail-gx0-f173.google.com with SMTP id h1so470435ggn.4 for ; Thu, 26 Jan 2012 09:23:33 -0800 (PST) Sender: Paolo Bonzini From: Paolo Bonzini Date: Thu, 26 Jan 2012 18:22:44 +0100 Message-Id: <1327598569-5199-14-git-send-email-pbonzini@redhat.com> In-Reply-To: <1327598569-5199-1-git-send-email-pbonzini@redhat.com> References: <1327598569-5199-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [PATCH v2 13/18] block: allow waiting at arbitrary granularity List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: kwolf@redhat.com When emulating small logical block sizes, the only overlaps that matter are at host block size granularity, not cluster. Make wait_for_overlapping_requests more flexible in this respect, too. Signed-off-by: Paolo Bonzini --- block.c | 43 ++++++++++++++++++++++++++++--------------- 1 files changed, 28 insertions(+), 15 deletions(-) diff --git a/block.c b/block.c index 76e1c6a..c78ca47 100644 --- a/block.c +++ b/block.c @@ -1166,24 +1166,33 @@ static void tracked_request_begin(BdrvTrackedRequest *req, /** * Round a region to cluster boundaries */ -static void round_to_clusters(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - int64_t *cluster_sector_num, - int *cluster_nb_sectors) +static void round_sectors(int64_t alignment, + int64_t sector_num, int nb_sectors, + int64_t *cluster_sector_num, + int *cluster_nb_sectors) { - BlockDriverInfo bdi; - - if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { + if (alignment == BDRV_SECTOR_SIZE) { *cluster_sector_num = sector_num; *cluster_nb_sectors = nb_sectors; } else { - int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; + int64_t c = alignment / BDRV_SECTOR_SIZE; *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + nb_sectors, c); } } +static int64_t get_cluster_size(BlockDriverState *bs) +{ + BlockDriverInfo bdi; + + if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { + return BDRV_SECTOR_SIZE; + } else { + return bdi.cluster_size / BDRV_SECTOR_SIZE; + } +} + static bool tracked_request_overlaps(BdrvTrackedRequest *req, int64_t sector_num, int nb_sectors) { /* aaaa bbbb */ @@ -1198,7 +1207,8 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req, } static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, bool writes_only) + int64_t sector_num, int nb_sectors, + int64_t granularity, bool writes_only) { BdrvTrackedRequest *req; int64_t cluster_sector_num; @@ -1211,8 +1221,8 @@ static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs, * CoR read and write operations are atomic and guest writes cannot * interleave between them. */ - round_to_clusters(bs, sector_num, nb_sectors, - &cluster_sector_num, &cluster_nb_sectors); + round_sectors(granularity, sector_num, nb_sectors, + &cluster_sector_num, &cluster_nb_sectors); if (writes_only && !(bs->open_flags & BDRV_O_RDWR)) { return; @@ -1532,8 +1542,9 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, /* Cover entire cluster so no additional backing file I/O is required when * allocating cluster in the image file. */ - round_to_clusters(bs, sector_num, nb_sectors, - &cluster_sector_num, &cluster_nb_sectors); + round_sectors(get_cluster_size(bs), + sector_num, nb_sectors, + &cluster_sector_num, &cluster_nb_sectors); trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, cluster_sector_num, cluster_nb_sectors); @@ -1598,7 +1609,8 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, } if (bs->copy_on_read_in_flight) { - wait_for_overlapping_requests(bs, sector_num, nb_sectors, false); + wait_for_overlapping_requests(bs, sector_num, nb_sectors, + get_cluster_size(bs), false); } tracked_request_begin(&req, bs, sector_num, nb_sectors, false); @@ -1672,7 +1684,8 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, } if (bs->copy_on_read_in_flight) { - wait_for_overlapping_requests(bs, sector_num, nb_sectors, false); + wait_for_overlapping_requests(bs, sector_num, nb_sectors, + get_cluster_size(bs), false); } tracked_request_begin(&req, bs, sector_num, nb_sectors, true); -- 1.7.7.6