* [Qemu-devel] [PATCH v2 1/5] block: directly invoke .bdrv_* from emulation functions
2011-10-13 12:08 [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Stefan Hajnoczi
@ 2011-10-13 12:08 ` Stefan Hajnoczi
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 2/5] block: switch bdrv_read()/bdrv_write() to coroutines Stefan Hajnoczi
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2011-10-13 12:08 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Stefan Hajnoczi
The emulation functions which supply default BlockDriver .bdrv_*()
functions given another implemented .bdrv_*() function should not use
public bdrv_*() interfaces. This patch ensures they invoke .bdrv_*()
directly to avoid adding an extra layer of coroutine request processing
and possibly entering an infinite loop.
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
block.c | 14 ++++++++------
1 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/block.c b/block.c
index 62903dd..f4731ec 100644
--- a/block.c
+++ b/block.c
@@ -2759,9 +2759,9 @@ static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
if (is_write) {
qemu_iovec_to_buffer(acb->qiov, acb->bounce);
- acb->ret = bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
+ acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
} else {
- acb->ret = bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
+ acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
}
qemu_bh_schedule(acb->bh);
@@ -2926,8 +2926,9 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
iov.iov_base = (void *)buf;
iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&qiov, &iov, 1);
- acb = bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,
- bdrv_rw_em_cb, &async_ret);
+
+ acb = bs->drv->bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,
+ bdrv_rw_em_cb, &async_ret);
if (acb == NULL) {
async_ret = -1;
goto fail;
@@ -2954,8 +2955,9 @@ static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
iov.iov_base = (void *)buf;
iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&qiov, &iov, 1);
- acb = bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,
- bdrv_rw_em_cb, &async_ret);
+
+ acb = bs->drv->bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,
+ bdrv_rw_em_cb, &async_ret);
if (acb == NULL) {
async_ret = -1;
goto fail;
--
1.7.6.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Qemu-devel] [PATCH v2 2/5] block: switch bdrv_read()/bdrv_write() to coroutines
2011-10-13 12:08 [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Stefan Hajnoczi
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 1/5] block: directly invoke .bdrv_* from emulation functions Stefan Hajnoczi
@ 2011-10-13 12:08 ` Stefan Hajnoczi
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 3/5] block: switch bdrv_aio_readv() " Stefan Hajnoczi
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2011-10-13 12:08 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Stefan Hajnoczi
The bdrv_read()/bdrv_write() functions call .bdrv_read()/.bdrv_write().
They should go through bdrv_co_do_readv() and bdrv_co_do_writev()
instead in order to unify request processing code across sync, aio, and
coroutine interfaces. This is also an important step towards removing
BlockDriverState .bdrv_read()/.bdrv_write() in the future.
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
block.c | 112 +++++++++++++++++++++++++++++++++++----------------------------
1 files changed, 62 insertions(+), 50 deletions(-)
diff --git a/block.c b/block.c
index f4731ec..ae8fc80 100644
--- a/block.c
+++ b/block.c
@@ -44,6 +44,8 @@
#include <windows.h>
#endif
+#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
+
static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
@@ -74,6 +76,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs);
static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
QTAILQ_HEAD_INITIALIZER(bdrv_states);
@@ -1042,30 +1046,69 @@ static inline bool bdrv_has_async_flush(BlockDriver *drv)
return drv->bdrv_aio_flush != bdrv_aio_flush_em;
}
-/* return < 0 if error. See bdrv_write() for the return codes */
-int bdrv_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
+typedef struct RwCo {
+ BlockDriverState *bs;
+ int64_t sector_num;
+ int nb_sectors;
+ QEMUIOVector *qiov;
+ bool is_write;
+ int ret;
+} RwCo;
+
+static void coroutine_fn bdrv_rw_co_entry(void *opaque)
{
- BlockDriver *drv = bs->drv;
+ RwCo *rwco = opaque;
- if (!drv)
- return -ENOMEDIUM;
+ if (!rwco->is_write) {
+ rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
+ rwco->nb_sectors, rwco->qiov);
+ } else {
+ rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
+ rwco->nb_sectors, rwco->qiov);
+ }
+}
- if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) {
- QEMUIOVector qiov;
- struct iovec iov = {
- .iov_base = (void *)buf,
- .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
- };
+/*
+ * Process a synchronous request using coroutines
+ */
+static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
+ int nb_sectors, bool is_write)
+{
+ QEMUIOVector qiov;
+ struct iovec iov = {
+ .iov_base = (void *)buf,
+ .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
+ };
+ Coroutine *co;
+ RwCo rwco = {
+ .bs = bs,
+ .sector_num = sector_num,
+ .nb_sectors = nb_sectors,
+ .qiov = &qiov,
+ .is_write = is_write,
+ .ret = NOT_DONE,
+ };
- qemu_iovec_init_external(&qiov, &iov, 1);
- return bdrv_co_readv(bs, sector_num, nb_sectors, &qiov);
- }
+ qemu_iovec_init_external(&qiov, &iov, 1);
- if (bdrv_check_request(bs, sector_num, nb_sectors))
- return -EIO;
+ if (qemu_in_coroutine()) {
+ /* Fast-path if already in coroutine context */
+ bdrv_rw_co_entry(&rwco);
+ } else {
+ co = qemu_coroutine_create(bdrv_rw_co_entry);
+ qemu_coroutine_enter(co, &rwco);
+ while (rwco.ret == NOT_DONE) {
+ qemu_aio_wait();
+ }
+ }
+ return rwco.ret;
+}
- return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
+/* return < 0 if error. See bdrv_write() for the return codes */
+int bdrv_read(BlockDriverState *bs, int64_t sector_num,
+ uint8_t *buf, int nb_sectors)
+{
+ return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
}
static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
@@ -1105,36 +1148,7 @@ static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
int bdrv_write(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
- BlockDriver *drv = bs->drv;
-
- if (!bs->drv)
- return -ENOMEDIUM;
-
- if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) {
- QEMUIOVector qiov;
- struct iovec iov = {
- .iov_base = (void *)buf,
- .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
- };
-
- qemu_iovec_init_external(&qiov, &iov, 1);
- return bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
- }
-
- if (bs->read_only)
- return -EACCES;
- if (bdrv_check_request(bs, sector_num, nb_sectors))
- return -EIO;
-
- if (bs->dirty_bitmap) {
- set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
- }
-
- if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
- bs->wr_highest_sector = sector_num + nb_sectors - 1;
- }
-
- return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
+ return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
}
int bdrv_pread(BlockDriverState *bs, int64_t offset,
@@ -2912,8 +2926,6 @@ static void bdrv_rw_em_cb(void *opaque, int ret)
*(int *)opaque = ret;
}
-#define NOT_DONE 0x7fffffff
-
static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
--
1.7.6.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Qemu-devel] [PATCH v2 3/5] block: switch bdrv_aio_readv() to coroutines
2011-10-13 12:08 [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Stefan Hajnoczi
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 1/5] block: directly invoke .bdrv_* from emulation functions Stefan Hajnoczi
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 2/5] block: switch bdrv_read()/bdrv_write() to coroutines Stefan Hajnoczi
@ 2011-10-13 12:08 ` Stefan Hajnoczi
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 4/5] block: mark blocks dirty on coroutine write completion Stefan Hajnoczi
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2011-10-13 12:08 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Stefan Hajnoczi
More sync, aio, and coroutine unification. Make bdrv_aio_readv() go
through coroutine request processing.
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
block.c | 48 +++++++++++++++++++++++++++++++++++-------------
1 files changed, 35 insertions(+), 13 deletions(-)
diff --git a/block.c b/block.c
index ae8fc80..e94fa61 100644
--- a/block.c
+++ b/block.c
@@ -78,6 +78,15 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov,
+ int nb_sectors,
+ BlockDriverCompletionFunc *cb,
+ void *opaque,
+ bool is_write,
+ CoroutineEntry *entry);
+static void coroutine_fn bdrv_co_do_rw(void *opaque);
static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
QTAILQ_HEAD_INITIALIZER(bdrv_states);
@@ -2367,17 +2376,10 @@ BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
{
- BlockDriver *drv = bs->drv;
-
trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
- if (!drv)
- return NULL;
- if (bdrv_check_request(bs, sector_num, nb_sectors))
- return NULL;
-
- return drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
- cb, opaque);
+ return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
+ cb, opaque, false, bdrv_co_do_rw);
}
typedef struct BlockCompleteData {
@@ -2824,6 +2826,7 @@ static void bdrv_co_rw_bh(void *opaque)
qemu_aio_release(acb);
}
+/* Invoke .bdrv_co_readv/.bdrv_co_writev */
static void coroutine_fn bdrv_co_rw(void *opaque)
{
BlockDriverAIOCBCoroutine *acb = opaque;
@@ -2841,13 +2844,32 @@ static void coroutine_fn bdrv_co_rw(void *opaque)
qemu_bh_schedule(acb->bh);
}
+/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
+static void coroutine_fn bdrv_co_do_rw(void *opaque)
+{
+ BlockDriverAIOCBCoroutine *acb = opaque;
+ BlockDriverState *bs = acb->common.bs;
+
+ if (!acb->is_write) {
+ acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
+ acb->req.nb_sectors, acb->req.qiov);
+ } else {
+ acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
+ acb->req.nb_sectors, acb->req.qiov);
+ }
+
+ acb->bh = qemu_bh_new(bdrv_co_rw_bh, acb);
+ qemu_bh_schedule(acb->bh);
+}
+
static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
int64_t sector_num,
QEMUIOVector *qiov,
int nb_sectors,
BlockDriverCompletionFunc *cb,
void *opaque,
- bool is_write)
+ bool is_write,
+ CoroutineEntry *entry)
{
Coroutine *co;
BlockDriverAIOCBCoroutine *acb;
@@ -2858,7 +2880,7 @@ static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
acb->req.qiov = qiov;
acb->is_write = is_write;
- co = qemu_coroutine_create(bdrv_co_rw);
+ co = qemu_coroutine_create(entry);
qemu_coroutine_enter(co, acb);
return &acb->common;
@@ -2869,7 +2891,7 @@ static BlockDriverAIOCB *bdrv_co_aio_readv_em(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque,
- false);
+ false, bdrv_co_rw);
}
static BlockDriverAIOCB *bdrv_co_aio_writev_em(BlockDriverState *bs,
@@ -2877,7 +2899,7 @@ static BlockDriverAIOCB *bdrv_co_aio_writev_em(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque,
- true);
+ true, bdrv_co_rw);
}
static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
--
1.7.6.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Qemu-devel] [PATCH v2 4/5] block: mark blocks dirty on coroutine write completion
2011-10-13 12:08 [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Stefan Hajnoczi
` (2 preceding siblings ...)
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 3/5] block: switch bdrv_aio_readv() " Stefan Hajnoczi
@ 2011-10-13 12:08 ` Stefan Hajnoczi
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 5/5] block: switch bdrv_aio_writev() to coroutines Stefan Hajnoczi
2011-10-13 13:05 ` [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Kevin Wolf
5 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2011-10-13 12:08 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Stefan Hajnoczi
The aio write operation marks blocks dirty when the write operation
completes. The coroutine write operation marks blocks dirty before
issuing the write operation.
It seems safest to mark the block dirty when the operation completes so
that anything tracking dirty blocks will not act before the change has
been made to the image file.
Make the coroutine write operation dirty blocks on write completion.
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
block.c | 5 ++++-
1 files changed, 4 insertions(+), 1 deletions(-)
diff --git a/block.c b/block.c
index e94fa61..02e15ca 100644
--- a/block.c
+++ b/block.c
@@ -1311,6 +1311,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
{
BlockDriver *drv = bs->drv;
+ int ret;
if (!bs->drv) {
return -ENOMEDIUM;
@@ -1322,6 +1323,8 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
return -EIO;
}
+ ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
+
if (bs->dirty_bitmap) {
set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
}
@@ -1330,7 +1333,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
bs->wr_highest_sector = sector_num + nb_sectors - 1;
}
- return drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
+ return ret;
}
int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
--
1.7.6.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Qemu-devel] [PATCH v2 5/5] block: switch bdrv_aio_writev() to coroutines
2011-10-13 12:08 [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Stefan Hajnoczi
` (3 preceding siblings ...)
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 4/5] block: mark blocks dirty on coroutine write completion Stefan Hajnoczi
@ 2011-10-13 12:08 ` Stefan Hajnoczi
2011-10-13 13:05 ` [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Kevin Wolf
5 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2011-10-13 12:08 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Stefan Hajnoczi
More sync, aio, and coroutine unification. Make bdrv_aio_writev() go
through coroutine request processing.
Remove the dirty block callback mechanism which was needed only for aio
processing and can be done more naturally in coroutine context.
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
block.c | 66 +-------------------------------------------------------------
1 files changed, 2 insertions(+), 64 deletions(-)
diff --git a/block.c b/block.c
index 02e15ca..8c7d05e 100644
--- a/block.c
+++ b/block.c
@@ -2385,76 +2385,14 @@ BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
cb, opaque, false, bdrv_co_do_rw);
}
-typedef struct BlockCompleteData {
- BlockDriverCompletionFunc *cb;
- void *opaque;
- BlockDriverState *bs;
- int64_t sector_num;
- int nb_sectors;
-} BlockCompleteData;
-
-static void block_complete_cb(void *opaque, int ret)
-{
- BlockCompleteData *b = opaque;
-
- if (b->bs->dirty_bitmap) {
- set_dirty_bitmap(b->bs, b->sector_num, b->nb_sectors, 1);
- }
- b->cb(b->opaque, ret);
- g_free(b);
-}
-
-static BlockCompleteData *blk_dirty_cb_alloc(BlockDriverState *bs,
- int64_t sector_num,
- int nb_sectors,
- BlockDriverCompletionFunc *cb,
- void *opaque)
-{
- BlockCompleteData *blkdata = g_malloc0(sizeof(BlockCompleteData));
-
- blkdata->bs = bs;
- blkdata->cb = cb;
- blkdata->opaque = opaque;
- blkdata->sector_num = sector_num;
- blkdata->nb_sectors = nb_sectors;
-
- return blkdata;
-}
-
BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
{
- BlockDriver *drv = bs->drv;
- BlockDriverAIOCB *ret;
- BlockCompleteData *blk_cb_data;
-
trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
- if (!drv)
- return NULL;
- if (bs->read_only)
- return NULL;
- if (bdrv_check_request(bs, sector_num, nb_sectors))
- return NULL;
-
- if (bs->dirty_bitmap) {
- blk_cb_data = blk_dirty_cb_alloc(bs, sector_num, nb_sectors, cb,
- opaque);
- cb = &block_complete_cb;
- opaque = blk_cb_data;
- }
-
- ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
- cb, opaque);
-
- if (ret) {
- if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
- bs->wr_highest_sector = sector_num + nb_sectors - 1;
- }
- }
-
- return ret;
+ return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
+ cb, opaque, true, bdrv_co_do_rw);
}
--
1.7.6.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine
2011-10-13 12:08 [Qemu-devel] [PATCH v2 0/5] block: do request processing in a coroutine Stefan Hajnoczi
` (4 preceding siblings ...)
2011-10-13 12:08 ` [Qemu-devel] [PATCH v2 5/5] block: switch bdrv_aio_writev() to coroutines Stefan Hajnoczi
@ 2011-10-13 13:05 ` Kevin Wolf
5 siblings, 0 replies; 7+ messages in thread
From: Kevin Wolf @ 2011-10-13 13:05 UTC (permalink / raw)
To: Stefan Hajnoczi; +Cc: qemu-devel
Am 13.10.2011 14:08, schrieb Stefan Hajnoczi:
> Note: this version applies against Kevin's block tree
>
> Block layer features like dirty block tracing, I/O throttling, and live block
> copy are forced to duplicate code due to the three different interfaces:
> synchronous, asynchronous, and coroutines.
>
> Since there are bdrv_read(), bdrv_aio_readv(), and bdrv_co_readv() interfaces
> for read (and similar for write), per-request processing needs to be duplicated
> for each of these execution contexts. For example, dirty block tracking code
> is duplicated across these three interfaces.
>
> This patch series unifies request processing so that there is only one code
> path. I see this as a prerequisite to the live block copy (image streaming)
> code I am working on, so I'm pushing it now.
>
> The short-term win from this series is that it becomes easy to add live block
> copy and other features. We now have a single code path where the perf-request
> processing is done.
>
> The longer-term win will be dropping the BlockDriver .bdrv_read(),
> .bdrv_write(), .bdrv_aio_readv(), and .bdrv_aio_writev() interfaces. By doing
> that we can bring all BlockDrivers onto a common interface, namely
> .bdrv_co_readv() and .bdrv_co_writev(). It will also allow us to drop most of
> the sync and aio emulation code.
>
> A consequence of this patch series is that every I/O request goes through at
> least one coroutine. There is no longer a direct .bdrv_read(), .bdrv_write(),
> .bdrv_aio_readv(), or .bdrv_aio_writev() call - we're trying to phase out those
> interfaces. I have not noticed performance degradation in correctness tests
> but we need to confirm that there has not been a performance regression.
>
> v2:
> * Fixed bdrv_read()/bdrv_write() infinite loop [Kevin]
>
> Stefan Hajnoczi (5):
> block: directly invoke .bdrv_* from emulation functions
> block: switch bdrv_read()/bdrv_write() to coroutines
> block: switch bdrv_aio_readv() to coroutines
> block: mark blocks dirty on coroutine write completion
> block: switch bdrv_aio_writev() to coroutines
>
> block.c | 245 ++++++++++++++++++++++++++++----------------------------------
> 1 files changed, 111 insertions(+), 134 deletions(-)
Thanks, applied to the block branch.
I think there's some dead code now, but we can leave the clean-up for later.
Kevin
^ permalink raw reply [flat|nested] 7+ messages in thread