From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, qemu-devel@nongnu.org
Subject: [Qemu-devel] [PULL 16/31] dma-helpers: change BlockBackend to opaque value in DMAIOFunc
Date: Wed, 25 May 2016 19:39:41 +0200 [thread overview]
Message-ID: <1464197996-4581-17-git-send-email-kwolf@redhat.com> (raw)
In-Reply-To: <1464197996-4581-1-git-send-email-kwolf@redhat.com>
From: Paolo Bonzini <pbonzini@redhat.com>
Callers of dma_blk_io have no way to pass extra data to the DMAIOFunc,
because the original callback and opaque are gone by the time DMAIOFunc
is called. On the other hand, the BlockBackend is usually derived
from those extra data that you could pass to the DMAIOFunc (in the
next patch, that would be the SCSIRequest).
So change DMAIOFunc's prototype, decoupling it from blk_aio_readv
and blk_aio_writev's. The new prototype loses the BlockBackend
and gains an extra opaque value which, in the case of dma_blk_readv
and dma_blk_writev, is of course used for the BlockBackend.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
dma-helpers.c | 48 +++++++++++++++++++++++++++++++++++-------------
hw/ide/core.c | 14 ++++++++------
hw/ide/internal.h | 6 +++---
hw/ide/macio.c | 2 +-
include/sysemu/dma.h | 12 ++++++------
5 files changed, 53 insertions(+), 29 deletions(-)
diff --git a/dma-helpers.c b/dma-helpers.c
index af07aab..b521d84 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -70,7 +70,7 @@ void qemu_sglist_destroy(QEMUSGList *qsg)
typedef struct {
BlockAIOCB common;
- BlockBackend *blk;
+ AioContext *ctx;
BlockAIOCB *acb;
QEMUSGList *sg;
uint64_t offset;
@@ -80,6 +80,7 @@ typedef struct {
QEMUIOVector iov;
QEMUBH *bh;
DMAIOFunc *io_func;
+ void *io_func_opaque;
} DMAAIOCB;
static void dma_blk_cb(void *opaque, int ret);
@@ -154,8 +155,7 @@ static void dma_blk_cb(void *opaque, int ret)
if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs);
- dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
- reschedule_dma, dbs);
+ dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs);
cpu_register_map_client(dbs->bh);
return;
}
@@ -164,8 +164,8 @@ static void dma_blk_cb(void *opaque, int ret)
qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
}
- dbs->acb = dbs->io_func(dbs->blk, dbs->offset, &dbs->iov, 0,
- dma_blk_cb, dbs);
+ dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
+ dma_blk_cb, dbs, dbs->io_func_opaque);
assert(dbs->acb);
}
@@ -191,23 +191,25 @@ static const AIOCBInfo dma_aiocb_info = {
.cancel_async = dma_aio_cancel,
};
-BlockAIOCB *dma_blk_io(
- BlockBackend *blk, QEMUSGList *sg, uint64_t offset,
- DMAIOFunc *io_func, BlockCompletionFunc *cb,
+BlockAIOCB *dma_blk_io(AioContext *ctx,
+ QEMUSGList *sg, uint64_t offset,
+ DMAIOFunc *io_func, void *io_func_opaque,
+ BlockCompletionFunc *cb,
void *opaque, DMADirection dir)
{
- DMAAIOCB *dbs = blk_aio_get(&dma_aiocb_info, blk, cb, opaque);
+ DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque);
- trace_dma_blk_io(dbs, blk, offset, (dir == DMA_DIRECTION_TO_DEVICE));
+ trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE));
dbs->acb = NULL;
- dbs->blk = blk;
dbs->sg = sg;
+ dbs->ctx = ctx;
dbs->offset = offset;
dbs->sg_cur_index = 0;
dbs->sg_cur_byte = 0;
dbs->dir = dir;
dbs->io_func = io_func;
+ dbs->io_func_opaque = io_func_opaque;
dbs->bh = NULL;
qemu_iovec_init(&dbs->iov, sg->nsg);
dma_blk_cb(dbs, 0);
@@ -215,19 +217,39 @@ BlockAIOCB *dma_blk_io(
}
+static
+BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov,
+ BlockCompletionFunc *cb, void *cb_opaque,
+ void *opaque)
+{
+ BlockBackend *blk = opaque;
+ return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque);
+}
+
BlockAIOCB *dma_blk_read(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset,
void (*cb)(void *opaque, int ret), void *opaque)
{
- return dma_blk_io(blk, sg, offset, blk_aio_preadv, cb, opaque,
+ return dma_blk_io(blk_get_aio_context(blk),
+ sg, offset, dma_blk_read_io_func, blk, cb, opaque,
DMA_DIRECTION_FROM_DEVICE);
}
+static
+BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov,
+ BlockCompletionFunc *cb, void *cb_opaque,
+ void *opaque)
+{
+ BlockBackend *blk = opaque;
+ return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque);
+}
+
BlockAIOCB *dma_blk_write(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset,
void (*cb)(void *opaque, int ret), void *opaque)
{
- return dma_blk_io(blk, sg, offset, blk_aio_pwritev, cb, opaque,
+ return dma_blk_io(blk_get_aio_context(blk),
+ sg, offset, dma_blk_write_io_func, blk, cb, opaque,
DMA_DIRECTION_TO_DEVICE);
}
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 7326189..029f6b9 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -441,13 +441,14 @@ static void ide_issue_trim_cb(void *opaque, int ret)
}
}
-BlockAIOCB *ide_issue_trim(BlockBackend *blk,
- int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque)
+BlockAIOCB *ide_issue_trim(
+ int64_t offset, QEMUIOVector *qiov,
+ BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
{
+ BlockBackend *blk = opaque;
TrimAIOCB *iocb;
- iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
+ iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
iocb->blk = blk;
iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
iocb->ret = 0;
@@ -871,8 +872,9 @@ static void ide_dma_cb(void *opaque, int ret)
ide_dma_cb, s);
break;
case IDE_DMA_TRIM:
- s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, offset,
- ide_issue_trim, ide_dma_cb, s,
+ s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
+ &s->sg, offset,
+ ide_issue_trim, s->blk, ide_dma_cb, s,
DMA_DIRECTION_TO_DEVICE);
break;
default:
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index ceb9e59..773928a 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -613,9 +613,9 @@ void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
EndTransferFunc *end_transfer_func);
void ide_transfer_stop(IDEState *s);
void ide_set_inactive(IDEState *s, bool more);
-BlockAIOCB *ide_issue_trim(BlockBackend *blk,
- int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *ide_issue_trim(
+ int64_t offset, QEMUIOVector *qiov,
+ BlockCompletionFunc *cb, void *cb_opaque, void *opaque);
BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index d7d9c0f..42ad68a 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -230,7 +230,7 @@ static void pmac_dma_trim(BlockBackend *blk,
s->io_buffer_index += io->len;
io->len = 0;
- s->bus->dma->aiocb = ide_issue_trim(blk, offset, &io->iov, 0, cb, io);
+ s->bus->dma->aiocb = ide_issue_trim(offset, &io->iov, cb, io, blk);
}
static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
index f0e0c30..34c8eaf 100644
--- a/include/sysemu/dma.h
+++ b/include/sysemu/dma.h
@@ -194,14 +194,14 @@ void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
#endif
-typedef BlockAIOCB *DMAIOFunc(BlockBackend *blk, int64_t offset,
- QEMUIOVector *iov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
+typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
+ BlockCompletionFunc *cb, void *cb_opaque,
+ void *opaque);
-BlockAIOCB *dma_blk_io(BlockBackend *blk,
+BlockAIOCB *dma_blk_io(AioContext *ctx,
QEMUSGList *sg, uint64_t offset,
- DMAIOFunc *io_func, BlockCompletionFunc *cb,
- void *opaque, DMADirection dir);
+ DMAIOFunc *io_func, void *io_func_opaque,
+ BlockCompletionFunc *cb, void *opaque, DMADirection dir);
BlockAIOCB *dma_blk_read(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset,
BlockCompletionFunc *cb, void *opaque);
--
1.8.3.1
next prev parent reply other threads:[~2016-05-25 17:40 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-05-25 17:39 [Qemu-devel] [PULL 00/31] Block layer patches Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 01/31] block: Fix bdrv_next() memory leak Kevin Wolf
2016-06-06 8:41 ` Paolo Bonzini
2016-05-25 17:39 ` [Qemu-devel] [PULL 02/31] block: Drop useless bdrv_new() call Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 03/31] block: Let bdrv_open_inherit() return the snapshot Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 04/31] tests: Drop BDS from test-throttle.c Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 05/31] block: Drop blk_new_with_bs() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 06/31] block: Drop bdrv_new_root() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 07/31] block: Make bdrv_open() return a BDS Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 08/31] block: Assert !bs->refcnt in bdrv_close() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 09/31] block: Drop bdrv_parent_cb_...() from bdrv_close() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 10/31] block: Drop errp parameter from blk_new() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 11/31] block: Introduce bdrv_replace_child() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 12/31] block: Make bdrv_drain() use bdrv_drained_begin/end() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 13/31] block: Fix reconfiguring graph with drained nodes Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 14/31] block: Propagate .drained_begin/end callbacks Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 15/31] dma-helpers: change interface to byte-based Kevin Wolf
2016-05-25 17:39 ` Kevin Wolf [this message]
2016-05-25 17:39 ` [Qemu-devel] [PULL 17/31] block: Rename blk_write_zeroes() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 18/31] block: keep a list of block jobs Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 19/31] block: Cancel jobs first in bdrv_close_all() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 20/31] block: Default to enabled write cache in blk_new() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 21/31] block: Convert block job core to BlockBackend Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 22/31] block: Make blk_co_preadv/pwritev() public Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 23/31] stream: Use BlockBackend for I/O Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 24/31] mirror: Allow target that already has a BlockBackend Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 25/31] mirror: Use BlockBackend for I/O Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 26/31] backup: Don't leak BackupBlockJob in error path Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 27/31] backup: Pack Notifier within BackupBlockJob Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 28/31] backup: Remove bs parameter from backup_do_cow() Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 29/31] backup: Use BlockBackend for I/O Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 30/31] commit: " Kevin Wolf
2016-05-25 17:39 ` [Qemu-devel] [PULL 31/31] blockjob: Remove BlockJob.bs Kevin Wolf
2016-05-26 14:06 ` [Qemu-devel] [PULL 00/31] Block layer patches Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1464197996-4581-17-git-send-email-kwolf@redhat.com \
--to=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).