From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:33581) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YNq1u-0003CP-EQ for qemu-devel@nongnu.org; Tue, 17 Feb 2015 16:50:07 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YNq1t-00044K-CB for qemu-devel@nongnu.org; Tue, 17 Feb 2015 16:50:06 -0500 Message-ID: <54E3B785.8000007@redhat.com> Date: Tue, 17 Feb 2015 16:49:57 -0500 From: Max Reitz MIME-Version: 1.0 References: <1424208819-10306-1-git-send-email-mreitz@redhat.com> <54E3B760.9070607@redhat.com> In-Reply-To: <54E3B760.9070607@redhat.com> Content-Type: text/plain; charset=windows-1252; format=flowed Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [PATCH] block/vdi: Fix locking for parallel requests List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Paolo Bonzini , qemu-devel@nongnu.org Cc: Kevin Wolf , Stefan Weil , qemu-stable , Stefan Hajnoczi On 2015-02-17 at 16:49, Paolo Bonzini wrote: > Cc: qemu-stable Right, I forgot that. Thanks! Max > On 17/02/2015 22:33, Max Reitz wrote: >> Concurrently modifying the bmap is not a good idea; this patch adds a >> lock for it. See https://bugs.launchpad.net/qemu/+bug/1422307 for what >> can go wrong without. >> >> Signed-off-by: Max Reitz >> --- >> block/vdi.c | 23 ++++++++++++++++++++--- >> 1 file changed, 20 insertions(+), 3 deletions(-) >> >> diff --git a/block/vdi.c b/block/vdi.c >> index 74030c6..c5ff428 100644 >> --- a/block/vdi.c >> +++ b/block/vdi.c >> @@ -51,6 +51,7 @@ >> >> #include "qemu-common.h" >> #include "block/block_int.h" >> +#include "block/coroutine.h" >> #include "qemu/module.h" >> #include "migration/migration.h" >> >> @@ -196,6 +197,8 @@ typedef struct { >> /* VDI header (converted to host endianness). */ >> VdiHeader header; >> >> + CoMutex bmap_lock; >> + >> Error *migration_blocker; >> } BDRVVdiState; >> >> @@ -498,6 +501,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, >> goto fail_free_bmap; >> } >> >> + qemu_co_mutex_init(&s->bmap_lock); >> + >> /* Disable migration when vdi images are used */ >> error_set(&s->migration_blocker, >> QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, >> @@ -619,6 +624,9 @@ static int vdi_co_write(BlockDriverState *bs, >> n_sectors, sector_num); >> >> /* prepare next AIO request */ >> + if (!block) { >> + qemu_co_mutex_lock(&s->bmap_lock); >> + } >> bmap_entry = le32_to_cpu(s->bmap[block_index]); >> if (!VDI_IS_ALLOCATED(bmap_entry)) { >> /* Allocate new block and write to it. */ >> @@ -641,9 +649,13 @@ static int vdi_co_write(BlockDriverState *bs, >> (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE); >> ret = bdrv_write(bs->file, offset, block, s->block_sectors); >> } else { >> - uint64_t offset = s->header.offset_data / SECTOR_SIZE + >> - (uint64_t)bmap_entry * s->block_sectors + >> - sector_in_block; >> + uint64_t offset; >> + >> + qemu_co_mutex_unlock(&s->bmap_lock); >> + >> + offset = s->header.offset_data / SECTOR_SIZE + >> + (uint64_t)bmap_entry * s->block_sectors + >> + sector_in_block; >> ret = bdrv_write(bs->file, offset, buf, n_sectors); >> } >> >> @@ -656,6 +668,9 @@ static int vdi_co_write(BlockDriverState *bs, >> >> logout("finished data write\n"); >> if (ret < 0) { >> + if (block) { >> + qemu_co_mutex_unlock(&s->bmap_lock); >> + } >> return ret; >> } >> >> @@ -688,6 +703,8 @@ static int vdi_co_write(BlockDriverState *bs, >> logout("will write %u block map sectors starting from entry %u\n", >> n_sectors, bmap_first); >> ret = bdrv_write(bs->file, offset, base, n_sectors); >> + >> + qemu_co_mutex_unlock(&s->bmap_lock); >> } >> >> return ret; >>