From: Paolo Bonzini <pbonzini@redhat.com>
To: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>, qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org, peter.crosthwaite@xilinx.com,
mark.burton@greensocs.com, real@ispras.ru, batuzovk@ispras.ru,
maria.klimushenkova@ispras.ru, alex.bennee@linaro.org,
afaerber@suse.de, fred.konrad@greensocs.com
Subject: Re: [Qemu-devel] [RFC PATCH v8 17/21] replay: replay aio requests
Date: Fri, 30 Jan 2015 12:07:04 +0100 [thread overview]
Message-ID: <54CB65D8.4030900@redhat.com> (raw)
In-Reply-To: <20150122085306.5276.94543.stgit@PASHA-ISP.def.inno>
On 22/01/2015 09:53, Pavel Dovgalyuk wrote:
> This patch adds identifier to aio requests. ID is used for creating bottom
> halves and identifying them while replaying.
> The patch also introduces several functions that make possible replaying
> of the aio requests.
>
> Signed-off-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>
Ah, some obscure parts of patch 16 now get clearer. :)
But it's still not clear what is the design. You have to document this
too, then I (or others) can start reviewing the patches.
Paolo
> ---
> block.c | 81 ++++++++++++++++++++++++++++++++++++----
> block/block-backend.c | 30 ++++++++++++++-
> block/qcow2.c | 4 ++
> dma-helpers.c | 6 ++-
> hw/block/virtio-blk.c | 10 ++---
> hw/ide/atapi.c | 10 +++--
> hw/ide/core.c | 14 ++++---
> include/block/block.h | 15 +++++++
> include/qemu-common.h | 2 +
> include/sysemu/block-backend.h | 10 +++++
> qemu-io-cmds.c | 2 -
> stubs/replay.c | 5 ++
> trace-events | 2 +
> util/iov.c | 4 ++
> 14 files changed, 167 insertions(+), 28 deletions(-)
>
> diff --git a/block.c b/block.c
> index a4f45c3..7f6fa8b 100644
> --- a/block.c
> +++ b/block.c
> @@ -83,7 +83,8 @@ static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
> BdrvRequestFlags flags,
> BlockCompletionFunc *cb,
> void *opaque,
> - bool is_write);
> + bool is_write,
> + bool aio_replay);
> static void coroutine_fn bdrv_co_do_rw(void *opaque);
> static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
> int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
> @@ -4425,7 +4426,19 @@ BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
> trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
>
> return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
> - cb, opaque, false);
> + cb, opaque, false, false);
> +}
> +
> +BlockAIOCB *bdrv_aio_readv_replay(BlockDriverState *bs,
> + int64_t sector_num,
> + QEMUIOVector *qiov, int nb_sectors,
> + BlockCompletionFunc *cb,
> + void *opaque)
> +{
> + trace_bdrv_aio_readv_replay(bs, sector_num, nb_sectors, opaque);
> +
> + return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
> + cb, opaque, false, true);
> }
>
> BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
> @@ -4435,7 +4448,19 @@ BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
> trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
>
> return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
> - cb, opaque, true);
> + cb, opaque, true, false);
> +}
> +
> +BlockAIOCB *bdrv_aio_writev_replay(BlockDriverState *bs,
> + int64_t sector_num,
> + QEMUIOVector *qiov, int nb_sectors,
> + BlockCompletionFunc *cb,
> + void *opaque)
> +{
> + trace_bdrv_aio_writev_replay(bs, sector_num, nb_sectors, opaque);
> +
> + return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
> + cb, opaque, true, true);
> }
>
> BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
> @@ -4446,7 +4471,7 @@ BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
>
> return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
> BDRV_REQ_ZERO_WRITE | flags,
> - cb, opaque, true);
> + cb, opaque, true, true);
> }
>
>
> @@ -4593,7 +4618,8 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
> * requests. However, the fields opaque and error are left unmodified as they
> * are used to signal failure for a single request to the caller.
> */
> -int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
> +int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs,
> + bool replay)
> {
> MultiwriteCB *mcb;
> int i;
> @@ -4631,7 +4657,7 @@ int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
> bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
> reqs[i].nb_sectors, reqs[i].flags,
> multiwrite_cb, mcb,
> - true);
> + true, replay);
> }
>
> return 0;
> @@ -4776,7 +4802,12 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)
> acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
> }
>
> - acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
> + if (acb->common.replay) {
> + acb->bh = aio_bh_new_replay(bdrv_get_aio_context(bs), bdrv_co_em_bh,
> + acb, acb->common.replay_step);
> + } else {
> + acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
> + }
> qemu_bh_schedule(acb->bh);
> }
>
> @@ -4787,7 +4818,8 @@ static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
> BdrvRequestFlags flags,
> BlockCompletionFunc *cb,
> void *opaque,
> - bool is_write)
> + bool is_write,
> + bool aio_replay)
> {
> Coroutine *co;
> BlockAIOCBCoroutine *acb;
> @@ -4798,6 +4830,11 @@ static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
> acb->req.qiov = qiov;
> acb->req.flags = flags;
> acb->is_write = is_write;
> + acb->done = NULL;
> + acb->common.replay = aio_replay;
> + if (aio_replay) {
> + acb->common.replay_step = replay_get_current_step();
> + }
>
> co = qemu_coroutine_create(bdrv_co_do_rw);
> qemu_coroutine_enter(co, acb);
> @@ -4811,7 +4848,12 @@ static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
> BlockDriverState *bs = acb->common.bs;
>
> acb->req.error = bdrv_co_flush(bs);
> - acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
> + if (acb->common.replay) {
> + acb->bh = aio_bh_new_replay(bdrv_get_aio_context(bs), bdrv_co_em_bh,
> + acb, acb->common.replay_step);
> + } else {
> + acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
> + }
> qemu_bh_schedule(acb->bh);
> }
>
> @@ -4831,6 +4873,25 @@ BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
> return &acb->common;
> }
>
> +BlockAIOCB *bdrv_aio_flush_replay(BlockDriverState *bs,
> + BlockCompletionFunc *cb, void *opaque)
> +{
> + trace_bdrv_aio_flush(bs, opaque);
> +
> + Coroutine *co;
> + BlockAIOCBCoroutine *acb;
> +
> + acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
> + acb->done = NULL;
> + acb->common.replay = true;
> + acb->common.replay_step = replay_get_current_step();
> +
> + co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
> + qemu_coroutine_enter(co, acb);
> +
> + return &acb->common;
> +}
> +
> static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
> {
> BlockAIOCBCoroutine *acb = opaque;
> @@ -4881,6 +4942,8 @@ void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
> acb->cb = cb;
> acb->opaque = opaque;
> acb->refcnt = 1;
> + acb->replay_step = 0;
> + acb->replay = false;
> return acb;
> }
>
> diff --git a/block/block-backend.c b/block/block-backend.c
> index d00c129..451041a 100644
> --- a/block/block-backend.c
> +++ b/block/block-backend.c
> @@ -447,6 +447,14 @@ BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
> return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
> }
>
> +BlockAIOCB *blk_aio_readv_replay(BlockBackend *blk, int64_t sector_num,
> + QEMUIOVector *iov, int nb_sectors,
> + BlockCompletionFunc *cb, void *opaque)
> +{
> + return bdrv_aio_readv_replay(blk->bs, sector_num, iov, nb_sectors,
> + cb, opaque);
> +}
> +
> BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
> QEMUIOVector *iov, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque)
> @@ -454,12 +462,26 @@ BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
> return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
> }
>
> +BlockAIOCB *blk_aio_writev_replay(BlockBackend *blk, int64_t sector_num,
> + QEMUIOVector *iov, int nb_sectors,
> + BlockCompletionFunc *cb, void *opaque)
> +{
> + return bdrv_aio_writev_replay(blk->bs, sector_num, iov, nb_sectors,
> + cb, opaque);
> +}
> +
> BlockAIOCB *blk_aio_flush(BlockBackend *blk,
> BlockCompletionFunc *cb, void *opaque)
> {
> return bdrv_aio_flush(blk->bs, cb, opaque);
> }
>
> +BlockAIOCB *blk_aio_flush_replay(BlockBackend *blk,
> + BlockCompletionFunc *cb, void *opaque)
> +{
> + return bdrv_aio_flush_replay(blk->bs, cb, opaque);
> +}
> +
> BlockAIOCB *blk_aio_discard(BlockBackend *blk,
> int64_t sector_num, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque)
> @@ -479,7 +501,13 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
>
> int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
> {
> - return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
> + return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs, false);
> +}
> +
> +int blk_aio_multiwrite_replay(BlockBackend *blk, BlockRequest *reqs,
> + int num_reqs)
> +{
> + return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs, true);
> }
>
> int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
> diff --git a/block/qcow2.c b/block/qcow2.c
> index e4e690a..4a2f6ae 100644
> --- a/block/qcow2.c
> +++ b/block/qcow2.c
> @@ -1140,6 +1140,8 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
> uint8_t *cluster_data = NULL;
>
> qemu_iovec_init(&hd_qiov, qiov->niov);
> + hd_qiov.replay = qiov->replay;
> + hd_qiov.replay_step = qiov->replay_step;
>
> qemu_co_mutex_lock(&s->lock);
>
> @@ -1297,6 +1299,8 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs,
> remaining_sectors);
>
> qemu_iovec_init(&hd_qiov, qiov->niov);
> + hd_qiov.replay = qiov->replay;
> + hd_qiov.replay_step = qiov->replay_step;
>
> s->cluster_cache_offset = -1; /* disable compressed cache */
>
> diff --git a/dma-helpers.c b/dma-helpers.c
> index 357d7e9..4faf6d3 100644
> --- a/dma-helpers.c
> +++ b/dma-helpers.c
> @@ -212,6 +212,8 @@ BlockAIOCB *dma_blk_io(
> dbs->io_func = io_func;
> dbs->bh = NULL;
> qemu_iovec_init(&dbs->iov, sg->nsg);
> + dbs->iov.replay = true;
> + dbs->iov.replay_step = replay_get_current_step();
> dma_blk_cb(dbs, 0);
> return &dbs->common;
> }
> @@ -221,7 +223,7 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
> QEMUSGList *sg, uint64_t sector,
> void (*cb)(void *opaque, int ret), void *opaque)
> {
> - return dma_blk_io(blk, sg, sector, blk_aio_readv, cb, opaque,
> + return dma_blk_io(blk, sg, sector, blk_aio_readv_replay, cb, opaque,
> DMA_DIRECTION_FROM_DEVICE);
> }
>
> @@ -229,7 +231,7 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
> QEMUSGList *sg, uint64_t sector,
> void (*cb)(void *opaque, int ret), void *opaque)
> {
> - return dma_blk_io(blk, sg, sector, blk_aio_writev, cb, opaque,
> + return dma_blk_io(blk, sg, sector, blk_aio_writev_replay, cb, opaque,
> DMA_DIRECTION_TO_DEVICE);
> }
>
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index b19b102..2413cbe 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -265,7 +265,7 @@ void virtio_submit_multiwrite(BlockBackend *blk, MultiReqBuffer *mrb)
> return;
> }
>
> - ret = blk_aio_multiwrite(blk, mrb->blkreq, mrb->num_writes);
> + ret = blk_aio_multiwrite_replay(blk, mrb->blkreq, mrb->num_writes);
> if (ret != 0) {
> for (i = 0; i < mrb->num_writes; i++) {
> if (mrb->blkreq[i].error) {
> @@ -286,7 +286,7 @@ static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
> * Make sure all outstanding writes are posted to the backing device.
> */
> virtio_submit_multiwrite(req->dev->blk, mrb);
> - blk_aio_flush(req->dev->blk, virtio_blk_flush_complete, req);
> + blk_aio_flush_replay(req->dev->blk, virtio_blk_flush_complete, req);
> }
>
> static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
> @@ -357,9 +357,9 @@ static void virtio_blk_handle_read(VirtIOBlockReq *req)
>
> block_acct_start(blk_get_stats(req->dev->blk), &req->acct, req->qiov.size,
> BLOCK_ACCT_READ);
> - blk_aio_readv(req->dev->blk, sector, &req->qiov,
> - req->qiov.size / BDRV_SECTOR_SIZE,
> - virtio_blk_rw_complete, req);
> + blk_aio_readv_replay(req->dev->blk, sector, &req->qiov,
> + req->qiov.size / BDRV_SECTOR_SIZE,
> + virtio_blk_rw_complete, req);
> }
>
> void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
> diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
> index a71e6e0..a69b620 100644
> --- a/hw/ide/atapi.c
> +++ b/hw/ide/atapi.c
> @@ -25,7 +25,7 @@
>
> #include "hw/ide/internal.h"
> #include "hw/scsi/scsi.h"
> -#include "sysemu/block-backend.h"
> +#include "replay/replay.h"
>
> static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret);
>
> @@ -350,10 +350,12 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
> s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset);
> s->bus->dma->iov.iov_len = n * 4 * 512;
> qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1);
> + s->bus->dma->qiov.replay = true;
> + s->bus->dma->qiov.replay_step = replay_get_current_step();
>
> - s->bus->dma->aiocb = blk_aio_readv(s->blk, (int64_t)s->lba << 2,
> - &s->bus->dma->qiov, n * 4,
> - ide_atapi_cmd_read_dma_cb, s);
> + s->bus->dma->aiocb = blk_aio_readv_replay(s->blk, (int64_t)s->lba << 2,
> + &s->bus->dma->qiov, n * 4,
> + ide_atapi_cmd_read_dma_cb, s);
> return;
>
> eot:
> diff --git a/hw/ide/core.c b/hw/ide/core.c
> index d76244a..1fff193 100644
> --- a/hw/ide/core.c
> +++ b/hw/ide/core.c
> @@ -630,11 +630,13 @@ void ide_sector_read(IDEState *s)
> s->iov.iov_base = s->io_buffer;
> s->iov.iov_len = n * BDRV_SECTOR_SIZE;
> qemu_iovec_init_external(&s->qiov, &s->iov, 1);
> + s->qiov.replay = true;
> + s->qiov.replay_step = replay_get_current_step();
>
> block_acct_start(blk_get_stats(s->blk), &s->acct,
> n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
> - s->pio_aiocb = blk_aio_readv(s->blk, sector_num, &s->qiov, n,
> - ide_sector_read_cb, s);
> + s->pio_aiocb = blk_aio_readv_replay(s->blk, sector_num, &s->qiov, n,
> + ide_sector_read_cb, s);
> }
>
> static void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
> @@ -888,11 +890,13 @@ void ide_sector_write(IDEState *s)
> s->iov.iov_base = s->io_buffer;
> s->iov.iov_len = n * BDRV_SECTOR_SIZE;
> qemu_iovec_init_external(&s->qiov, &s->iov, 1);
> + s->qiov.replay = true;
> + s->qiov.replay_step = replay_get_current_step();
>
> block_acct_start(blk_get_stats(s->blk), &s->acct,
> n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
> - s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
> - ide_sector_write_cb, s);
> + s->pio_aiocb = blk_aio_writev_replay(s->blk, sector_num, &s->qiov, n,
> + ide_sector_write_cb, s);
> }
>
> static void ide_flush_cb(void *opaque, int ret)
> @@ -928,7 +932,7 @@ void ide_flush_cache(IDEState *s)
>
> s->status |= BUSY_STAT;
> block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
> - s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
> + s->pio_aiocb = blk_aio_flush_replay(s->blk, ide_flush_cb, s);
> }
>
> static void ide_cfata_metadata_inquiry(IDEState *s)
> diff --git a/include/block/block.h b/include/block/block.h
> index 3082d2b..06ce097 100644
> --- a/include/block/block.h
> +++ b/include/block/block.h
> @@ -293,11 +293,24 @@ typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector,
> BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
> QEMUIOVector *iov, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque);
> +BlockAIOCB *bdrv_aio_readv_replay(BlockDriverState *bs,
> + int64_t sector_num,
> + QEMUIOVector *iov, int nb_sectors,
> + BlockCompletionFunc *cb,
> + void *opaque);
> BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
> QEMUIOVector *iov, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque);
> +BlockAIOCB *bdrv_aio_writev_replay(BlockDriverState *bs,
> + int64_t sector_num,
> + QEMUIOVector *iov, int nb_sectors,
> + BlockCompletionFunc *cb,
> + void *opaque);
> BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
> BlockCompletionFunc *cb, void *opaque);
> +BlockAIOCB *bdrv_aio_flush_replay(BlockDriverState *bs,
> + BlockCompletionFunc *cb,
> + void *opaque);
> BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
> int64_t sector_num, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque);
> @@ -318,7 +331,7 @@ typedef struct BlockRequest {
> } BlockRequest;
>
> int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs,
> - int num_reqs);
> + int num_reqs, bool replay);
>
> /* sg packet commands */
> int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
> diff --git a/include/qemu-common.h b/include/qemu-common.h
> index 8c9957e..016556a 100644
> --- a/include/qemu-common.h
> +++ b/include/qemu-common.h
> @@ -320,6 +320,8 @@ typedef struct QEMUIOVector {
> int niov;
> int nalloc;
> size_t size;
> + bool replay;
> + uint64_t replay_step;
> } QEMUIOVector;
>
> void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
> diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
> index 8871a02..c34855e 100644
> --- a/include/sysemu/block-backend.h
> +++ b/include/sysemu/block-backend.h
> @@ -94,17 +94,27 @@ void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
> BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
> QEMUIOVector *iov, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque);
> +BlockAIOCB *blk_aio_readv_replay(BlockBackend *blk, int64_t sector_num,
> + QEMUIOVector *iov, int nb_sectors,
> + BlockCompletionFunc *cb, void *opaque);
> BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
> QEMUIOVector *iov, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque);
> +BlockAIOCB *blk_aio_writev_replay(BlockBackend *blk, int64_t sector_num,
> + QEMUIOVector *iov, int nb_sectors,
> + BlockCompletionFunc *cb, void *opaque);
> BlockAIOCB *blk_aio_flush(BlockBackend *blk,
> BlockCompletionFunc *cb, void *opaque);
> +BlockAIOCB *blk_aio_flush_replay(BlockBackend *blk,
> + BlockCompletionFunc *cb, void *opaque);
> BlockAIOCB *blk_aio_discard(BlockBackend *blk,
> int64_t sector_num, int nb_sectors,
> BlockCompletionFunc *cb, void *opaque);
> void blk_aio_cancel(BlockAIOCB *acb);
> void blk_aio_cancel_async(BlockAIOCB *acb);
> int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs);
> +int blk_aio_multiwrite_replay(BlockBackend *blk, BlockRequest *reqs,
> + int num_reqs);
> int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
> BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
> BlockCompletionFunc *cb, void *opaque);
> diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
> index e708552..a7d8d4e 100644
> --- a/qemu-io-cmds.c
> +++ b/qemu-io-cmds.c
> @@ -583,7 +583,7 @@ static int do_aio_multiwrite(BlockDriverState *bs, BlockRequest* reqs,
> *total += reqs[i].qiov->size;
> }
>
> - ret = bdrv_aio_multiwrite(bs, reqs, num_reqs);
> + ret = bdrv_aio_multiwrite(bs, reqs, num_reqs, false);
> if (ret < 0) {
> return ret;
> }
> diff --git a/stubs/replay.c b/stubs/replay.c
> index 268f3e0..95b43f3 100755
> --- a/stubs/replay.c
> +++ b/stubs/replay.c
> @@ -25,3 +25,8 @@ int runstate_is_running(void)
> void replay_add_bh_event(void *bh, uint64_t id)
> {
> }
> +
> +uint64_t replay_get_current_step(void)
> +{
> + return 0;
> +}
> diff --git a/trace-events b/trace-events
> index 4ec81eb..2dd21ef 100644
> --- a/trace-events
> +++ b/trace-events
> @@ -64,7 +64,9 @@ bdrv_aio_multiwrite(void *mcb, int num_callbacks, int num_reqs) "mcb %p num_call
> bdrv_aio_discard(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
> bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p"
> bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
> +bdrv_aio_readv_replay(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
> bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
> +bdrv_aio_writev_replay(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
> bdrv_aio_write_zeroes(void *bs, int64_t sector_num, int nb_sectors, int flags, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d flags %#x opaque %p"
> bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
> bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
> diff --git a/util/iov.c b/util/iov.c
> index 2fb18e6..96517e7 100644
> --- a/util/iov.c
> +++ b/util/iov.c
> @@ -257,6 +257,8 @@ void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint)
> qiov->niov = 0;
> qiov->nalloc = alloc_hint;
> qiov->size = 0;
> + qiov->replay = false;
> + qiov->replay_step = 0;
> }
>
> void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov)
> @@ -267,6 +269,8 @@ void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov)
> qiov->niov = niov;
> qiov->nalloc = -1;
> qiov->size = 0;
> + qiov->replay = false;
> + qiov->replay_step = 0;
> for (i = 0; i < niov; i++)
> qiov->size += iov[i].iov_len;
> }
>
>
>
next prev parent reply other threads:[~2015-01-30 11:07 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-01-22 8:51 [Qemu-devel] [RFC PATCH v8 00/21] Deterministic replay core Pavel Dovgalyuk
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 01/21] i386: partial revert of interrupt poll fix Pavel Dovgalyuk
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 02/21] replay: global variables and function stubs Pavel Dovgalyuk
2015-01-29 9:02 ` Paolo Bonzini
2015-01-29 23:23 ` Eric Blake
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 03/21] sysemu: system functions for replay Pavel Dovgalyuk
2015-01-29 9:03 ` Paolo Bonzini
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 04/21] replay: internal functions for replay log Pavel Dovgalyuk
2015-01-29 9:11 ` Paolo Bonzini
2015-01-30 12:56 ` Pavel Dovgaluk
2015-01-30 13:06 ` Paolo Bonzini
2015-01-30 13:11 ` Mark Burton
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 05/21] replay: introduce mutex to protect the " Pavel Dovgalyuk
2015-01-29 9:12 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 06/21] replay: introduce icount event Pavel Dovgalyuk
2015-01-29 9:14 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 07/21] cpu-exec: allow temporary disabling icount Pavel Dovgalyuk
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 08/21] cpu: replay instructions sequence Pavel Dovgalyuk
2015-01-29 9:32 ` Paolo Bonzini
2015-02-02 12:28 ` Pavel Dovgaluk
2015-02-02 12:38 ` Paolo Bonzini
2015-02-02 12:42 ` Pavel Dovgaluk
[not found] ` <28583.7738695138$1422880978@news.gmane.org>
2015-02-02 13:18 ` Paolo Bonzini
2015-02-16 12:26 ` Pavel Dovgaluk
[not found] ` <6071.25815372473$1424089600@news.gmane.org>
2015-02-16 12:59 ` Paolo Bonzini
2015-02-16 13:27 ` Pavel Dovgaluk
[not found] ` <8198.56250095672$1424093273@news.gmane.org>
2015-02-16 13:31 ` Paolo Bonzini
2015-02-16 13:37 ` Pavel Dovgaluk
[not found] ` <39577.5216319182$1424093895@news.gmane.org>
2015-02-16 13:53 ` Paolo Bonzini
2015-02-17 8:43 ` Pavel Dovgaluk
2015-02-17 10:58 ` Paolo Bonzini
2015-02-17 11:35 ` Pavel Dovgaluk
2015-02-17 12:21 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 09/21] replay: interrupts and exceptions Pavel Dovgalyuk
2015-01-29 9:44 ` Paolo Bonzini
2015-02-02 13:50 ` Pavel Dovgaluk
[not found] ` <23862.806443549$1422885088@news.gmane.org>
2015-02-02 14:18 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 10/21] replay: asynchronous events infrastructure Pavel Dovgalyuk
2015-01-29 10:06 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 11/21] replay: recording and replaying clock ticks Pavel Dovgalyuk
2015-01-29 10:16 ` Paolo Bonzini
2015-02-03 10:51 ` Pavel Dovgaluk
2015-02-03 11:04 ` Paolo Bonzini
2015-02-03 11:23 ` Pavel Dovgaluk
2015-02-03 11:59 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 12/21] replay: recording and replaying different timers Pavel Dovgalyuk
2015-01-29 10:20 ` Paolo Bonzini
2015-02-03 14:05 ` Pavel Dovgaluk
2015-02-04 15:20 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 13/21] replay: shutdown event Pavel Dovgalyuk
2015-01-29 10:20 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 14/21] replay: checkpoints Pavel Dovgalyuk
2015-01-30 11:05 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 15/21] aio: replace stack of bottom halves with queue Pavel Dovgalyuk
2015-01-30 10:43 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 16/21] replay: bottom halves Pavel Dovgalyuk
2015-01-30 10:49 ` Paolo Bonzini
2015-02-11 13:03 ` Pavel Dovgaluk
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 17/21] replay: replay aio requests Pavel Dovgalyuk
2015-01-30 11:07 ` Paolo Bonzini [this message]
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 18/21] replay: thread pool Pavel Dovgalyuk
2015-01-30 11:13 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 19/21] replay: initialization and deinitialization Pavel Dovgalyuk
2015-01-30 11:02 ` Paolo Bonzini
2015-02-09 12:59 ` Pavel Dovgaluk
2015-02-09 13:01 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 20/21] replay: command line options Pavel Dovgalyuk
2015-01-30 10:54 ` Paolo Bonzini
2015-02-09 12:15 ` Pavel Dovgaluk
2015-02-09 12:26 ` Paolo Bonzini
2015-02-12 9:12 ` Pavel Dovgaluk
2015-02-12 14:12 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 21/21] replay: recording of the user input Pavel Dovgalyuk
2015-01-30 11:23 ` Paolo Bonzini
2015-02-12 7:43 ` Pavel Dovgaluk
2015-02-12 8:08 ` Pavel Dovgaluk
2015-02-12 14:41 ` Paolo Bonzini
2015-01-28 11:45 ` [Qemu-devel] [RFC PATCH v8 00/21] Deterministic replay core Pavel Dovgaluk
[not found] ` <28048.5671981753$1422445570@news.gmane.org>
2015-01-29 10:21 ` Paolo Bonzini
2015-01-30 11:25 ` Paolo Bonzini
2015-02-02 14:30 ` Paolo Bonzini
2015-02-03 6:47 ` Pavel Dovgaluk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=54CB65D8.4030900@redhat.com \
--to=pbonzini@redhat.com \
--cc=Pavel.Dovgaluk@ispras.ru \
--cc=afaerber@suse.de \
--cc=alex.bennee@linaro.org \
--cc=batuzovk@ispras.ru \
--cc=fred.konrad@greensocs.com \
--cc=maria.klimushenkova@ispras.ru \
--cc=mark.burton@greensocs.com \
--cc=peter.crosthwaite@xilinx.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=real@ispras.ru \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).