From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([140.186.70.92]:40666) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1R1JwL-0002TT-DI for qemu-devel@nongnu.org; Wed, 07 Sep 2011 11:21:31 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1R1JwE-000885-BY for qemu-devel@nongnu.org; Wed, 07 Sep 2011 11:21:25 -0400 Received: from mail-yw0-f45.google.com ([209.85.213.45]:54078) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1R1JwE-00087E-6w for qemu-devel@nongnu.org; Wed, 07 Sep 2011 11:21:18 -0400 Received: by ywf9 with SMTP id 9so319973ywf.4 for ; Wed, 07 Sep 2011 08:21:17 -0700 (PDT) Sender: Paolo Bonzini From: Paolo Bonzini Date: Wed, 7 Sep 2011 17:21:00 +0200 Message-Id: <1315408862-15178-4-git-send-email-pbonzini@redhat.com> In-Reply-To: <1315408862-15178-1-git-send-email-pbonzini@redhat.com> References: <1315408862-15178-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [PATCH 3/5] dma-helpers: rewrite completion/cancellation List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org This fixes various problems with completion/cancellation: * If DMA encounters a bounce buffer conflict, and the DMA operation is canceled before the bottom half fires, bad things happen. * memory is not unmapped after cancellation, again causing problems when doing DMA to I/O areas * cancellation could leak the iovec and probably more that I've missed. The patch fixes them by sharing the cleanup code between completion and cancellation. The dma_bdrv_cb now returns a boolean completed/not completed flag, and the wrapper dma_continue takes care of tasks to do upon completion. Most of these are basically impossible in practice, but it is better to be tidy... Signed-off-by: Paolo Bonzini --- dma-helpers.c | 37 ++++++++++++++++++++++++------------- 1 files changed, 24 insertions(+), 13 deletions(-) diff --git a/dma-helpers.c b/dma-helpers.c index ca97e14..cc8c4e3 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -58,7 +58,7 @@ static void reschedule_dma(void *opaque) qemu_bh_delete(dbs->bh); dbs->bh = NULL; - dma_bdrv_cb(opaque, 0); + dma_bdrv_cb(dbs, 0); } static void continue_after_map_failure(void *opaque) @@ -78,6 +78,21 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs) dbs->iov.iov[i].iov_len, !dbs->to_dev, dbs->iov.iov[i].iov_len); } + qemu_iovec_reset(&dbs->iov); +} + +static void dma_complete(DMAAIOCB *dbs, int ret) +{ + dma_bdrv_unmap(dbs); + if (dbs->common.cb) { + dbs->common.cb(dbs->common.opaque, ret); + } + qemu_iovec_destroy(&dbs->iov); + if (dbs->bh) { + qemu_bh_delete(dbs->bh); + dbs->bh = NULL; + } + qemu_aio_release(dbs); } static void dma_bdrv_cb(void *opaque, int ret) @@ -89,12 +104,9 @@ static void dma_bdrv_cb(void *opaque, int ret) dbs->acb = NULL; dbs->sector_num += dbs->iov.size / 512; dma_bdrv_unmap(dbs); - qemu_iovec_reset(&dbs->iov); if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { - dbs->common.cb(dbs->common.opaque, ret); - qemu_iovec_destroy(&dbs->iov); - qemu_aio_release(dbs); + dma_complete(dbs, ret); return; } @@ -120,9 +132,8 @@ static void dma_bdrv_cb(void *opaque, int ret) dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov, dbs->iov.size / 512, dma_bdrv_cb, dbs); if (!dbs->acb) { - dma_bdrv_unmap(dbs); - qemu_iovec_destroy(&dbs->iov); - return; + dbs->common.cb = NULL; + dma_complete(dbs, -ENOMEM); } } @@ -131,8 +142,12 @@ static void dma_aio_cancel(BlockDriverAIOCB *acb) DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); if (dbs->acb) { - bdrv_aio_cancel(dbs->acb); + BlockDriverAIOCB *acb = dbs->acb; + dbs->acb = NULL; + bdrv_aio_cancel(acb); } + dbs->common.cb = NULL; + dma_complete(dbs, 0); } static AIOPool dma_aio_pool = { @@ -158,10 +173,6 @@ BlockDriverAIOCB *dma_bdrv_io( dbs->bh = NULL; qemu_iovec_init(&dbs->iov, sg->nsg); dma_bdrv_cb(dbs, 0); - if (!dbs->acb) { - qemu_aio_release(dbs); - return NULL; - } return &dbs->common; } -- 1.7.6