* [Qemu-devel] [PULL 01/17] block: Add bdrv_get_block_status_above
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 02/17] qmp: Add optional bool "unmap" to drive-mirror Stefan Hajnoczi
` (16 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
Like bdrv_is_allocated_above, this function follows the backing chain until seeing
BDRV_BLOCK_ALLOCATED. Base is not included.
Reimplement bdrv_get_block_status on top.
[s/bdrv_is_allocated/bdrv_get_block_status/ in commit message as
suggested by Paolo.
--Stefan]
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1432790990-25383-2-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/io.c | 56 +++++++++++++++++++++++++++++++++++++++++----------
include/block/block.h | 4 ++++
2 files changed, 49 insertions(+), 11 deletions(-)
diff --git a/block/io.c b/block/io.c
index e394d92..41463a6 100644
--- a/block/io.c
+++ b/block/io.c
@@ -1560,28 +1560,54 @@ static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
return ret;
}
-/* Coroutine wrapper for bdrv_get_block_status() */
-static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
+static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
+ BlockDriverState *base,
+ int64_t sector_num,
+ int nb_sectors,
+ int *pnum)
+{
+ BlockDriverState *p;
+ int64_t ret;
+
+ assert(bs != base);
+ for (p = bs; p != base; p = p->backing_hd) {
+ ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum);
+ if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
+ break;
+ }
+ /* [sector_num, pnum] unallocated on this layer, which could be only
+ * the first part of [sector_num, nb_sectors]. */
+ nb_sectors = MIN(nb_sectors, *pnum);
+ }
+ return ret;
+}
+
+/* Coroutine wrapper for bdrv_get_block_status_above() */
+static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
{
BdrvCoGetBlockStatusData *data = opaque;
- BlockDriverState *bs = data->bs;
- data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
- data->pnum);
+ data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
+ data->sector_num,
+ data->nb_sectors,
+ data->pnum);
data->done = true;
}
/*
- * Synchronous wrapper around bdrv_co_get_block_status().
+ * Synchronous wrapper around bdrv_co_get_block_status_above().
*
- * See bdrv_co_get_block_status() for details.
+ * See bdrv_co_get_block_status_above() for details.
*/
-int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, int *pnum)
+int64_t bdrv_get_block_status_above(BlockDriverState *bs,
+ BlockDriverState *base,
+ int64_t sector_num,
+ int nb_sectors, int *pnum)
{
Coroutine *co;
BdrvCoGetBlockStatusData data = {
.bs = bs,
+ .base = base,
.sector_num = sector_num,
.nb_sectors = nb_sectors,
.pnum = pnum,
@@ -1590,11 +1616,11 @@ int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
- bdrv_get_block_status_co_entry(&data);
+ bdrv_get_block_status_above_co_entry(&data);
} else {
AioContext *aio_context = bdrv_get_aio_context(bs);
- co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
+ co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
qemu_coroutine_enter(co, &data);
while (!data.done) {
aio_poll(aio_context, true);
@@ -1603,6 +1629,14 @@ int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
return data.ret;
}
+int64_t bdrv_get_block_status(BlockDriverState *bs,
+ int64_t sector_num,
+ int nb_sectors, int *pnum)
+{
+ return bdrv_get_block_status_above(bs, bs->backing_hd,
+ sector_num, nb_sectors, pnum);
+}
+
int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, int *pnum)
{
diff --git a/include/block/block.h b/include/block/block.h
index f7680b6..4d9b555 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -361,6 +361,10 @@ bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, int *pnum);
+int64_t bdrv_get_block_status_above(BlockDriverState *bs,
+ BlockDriverState *base,
+ int64_t sector_num,
+ int nb_sectors, int *pnum);
int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
int *pnum);
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 02/17] qmp: Add optional bool "unmap" to drive-mirror
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 01/17] block: Add bdrv_get_block_status_above Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-08 5:41 ` Fam Zheng
2015-06-05 11:57 ` [Qemu-devel] [PULL 03/17] mirror: Do zero write on target if sectors not allocated Stefan Hajnoczi
` (15 subsequent siblings)
17 siblings, 1 reply; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
If specified as "true", it allows discarding on target sectors where source is
not allocated.
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1432790990-25383-3-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/mirror.c | 7 +++++--
blockdev.c | 5 +++++
hmp.c | 2 +-
include/block/block_int.h | 2 ++
qapi/block-core.json | 8 +++++++-
qmp-commands.hx | 3 +++
6 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/block/mirror.c b/block/mirror.c
index 58f391a..85995b2 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -57,6 +57,7 @@ typedef struct MirrorBlockJob {
int in_flight;
int sectors_in_flight;
int ret;
+ bool unmap;
} MirrorBlockJob;
typedef struct MirrorOp {
@@ -651,6 +652,7 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
int64_t buf_size,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
+ bool unmap,
BlockCompletionFunc *cb,
void *opaque, Error **errp,
const BlockJobDriver *driver,
@@ -703,6 +705,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
int64_t speed, uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
+ bool unmap,
BlockCompletionFunc *cb,
void *opaque, Error **errp)
{
@@ -717,7 +720,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
mirror_start_job(bs, target, replaces,
speed, granularity, buf_size,
- on_source_error, on_target_error, cb, opaque, errp,
+ on_source_error, on_target_error, unmap, cb, opaque, errp,
&mirror_job_driver, is_none_mode, base);
}
@@ -765,7 +768,7 @@ void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
bdrv_ref(base);
mirror_start_job(bs, base, NULL, speed, 0, 0,
- on_error, on_error, cb, opaque, &local_err,
+ on_error, on_error, false, cb, opaque, &local_err,
&commit_active_job_driver, false, base);
if (local_err) {
error_propagate(errp, local_err);
diff --git a/blockdev.c b/blockdev.c
index de94a8b..6a45555 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -2631,6 +2631,7 @@ void qmp_drive_mirror(const char *device, const char *target,
bool has_buf_size, int64_t buf_size,
bool has_on_source_error, BlockdevOnError on_source_error,
bool has_on_target_error, BlockdevOnError on_target_error,
+ bool has_unmap, bool unmap,
Error **errp)
{
BlockBackend *blk;
@@ -2662,6 +2663,9 @@ void qmp_drive_mirror(const char *device, const char *target,
if (!has_buf_size) {
buf_size = DEFAULT_MIRROR_BUF_SIZE;
}
+ if (!has_unmap) {
+ unmap = true;
+ }
if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
@@ -2801,6 +2805,7 @@ void qmp_drive_mirror(const char *device, const char *target,
has_replaces ? replaces : NULL,
speed, granularity, buf_size, sync,
on_source_error, on_target_error,
+ unmap,
block_job_cb, bs, &local_err);
if (local_err != NULL) {
bdrv_unref(target_bs);
diff --git a/hmp.c b/hmp.c
index 514f22f..d5b9ebb 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1057,7 +1057,7 @@ void hmp_drive_mirror(Monitor *mon, const QDict *qdict)
false, NULL, false, NULL,
full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP,
true, mode, false, 0, false, 0, false, 0,
- false, 0, false, 0, &err);
+ false, 0, false, 0, false, true, &err);
hmp_handle_error(mon, &err);
}
diff --git a/include/block/block_int.h b/include/block/block_int.h
index f004378..4e0d700 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -590,6 +590,7 @@ void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
* @mode: Whether to collapse all images in the chain to the target.
* @on_source_error: The action to take upon error reading from the source.
* @on_target_error: The action to take upon error writing to the target.
+ * @unmap: Whether to unmap target where source sectors only contain zeroes.
* @cb: Completion function for the job.
* @opaque: Opaque pointer value passed to @cb.
* @errp: Error object.
@@ -604,6 +605,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
int64_t speed, uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
+ bool unmap,
BlockCompletionFunc *cb,
void *opaque, Error **errp);
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 8411d4f..71ed838 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -973,6 +973,11 @@
# @on-target-error: #optional the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
+# @unmap: #optional Whether to try to unmap target sectors where source has
+# only zero. If true, and target unallocated sectors will read as zero,
+# target image sectors will be unmapped; otherwise, zeroes will be
+# written. Both will result in identical contents.
+# Default is true. (Since 2.4)
#
# Returns: nothing on success
# If @device is not a valid block device, DeviceNotFound
@@ -985,7 +990,8 @@
'sync': 'MirrorSyncMode', '*mode': 'NewImageMode',
'*speed': 'int', '*granularity': 'uint32',
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
- '*on-target-error': 'BlockdevOnError' } }
+ '*on-target-error': 'BlockdevOnError',
+ '*unmap': 'bool' } }
##
# @BlockDirtyBitmap
diff --git a/qmp-commands.hx b/qmp-commands.hx
index 867a21f..3b33496 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -1503,6 +1503,7 @@ EQMP
.args_type = "sync:s,device:B,target:s,speed:i?,mode:s?,format:s?,"
"node-name:s?,replaces:s?,"
"on-source-error:s?,on-target-error:s?,"
+ "unmap:b?,"
"granularity:i?,buf-size:i?",
.mhandler.cmd_new = qmp_marshal_input_drive_mirror,
},
@@ -1542,6 +1543,8 @@ Arguments:
(BlockdevOnError, default 'report')
- "on-target-error": the action to take on an error on the target
(BlockdevOnError, default 'report')
+- "unmap": whether the target sectors should be discarded where source has only
+ zeroes. (json-bool, optional, default true)
The default value of the granularity is the image cluster size clamped
between 4096 and 65536, if the image format defines one. If the format
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] [PULL 02/17] qmp: Add optional bool "unmap" to drive-mirror
2015-06-05 11:57 ` [Qemu-devel] [PULL 02/17] qmp: Add optional bool "unmap" to drive-mirror Stefan Hajnoczi
@ 2015-06-08 5:41 ` Fam Zheng
0 siblings, 0 replies; 20+ messages in thread
From: Fam Zheng @ 2015-06-08 5:41 UTC (permalink / raw)
To: Stefan Hajnoczi; +Cc: Kevin Wolf, Peter Maydell, qemu-devel
On Fri, 06/05 12:57, Stefan Hajnoczi wrote:
> From: Fam Zheng <famz@redhat.com>
>
> If specified as "true", it allows discarding on target sectors where source is
> not allocated.
>
> Signed-off-by: Fam Zheng <famz@redhat.com>
> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
> Message-id: 1432790990-25383-3-git-send-email-famz@redhat.com
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> block/mirror.c | 7 +++++--
> blockdev.c | 5 +++++
> hmp.c | 2 +-
> include/block/block_int.h | 2 ++
> qapi/block-core.json | 8 +++++++-
> qmp-commands.hx | 3 +++
> 6 files changed, 23 insertions(+), 4 deletions(-)
>
> diff --git a/block/mirror.c b/block/mirror.c
> index 58f391a..85995b2 100644
> --- a/block/mirror.c
> +++ b/block/mirror.c
> @@ -57,6 +57,7 @@ typedef struct MirrorBlockJob {
> int in_flight;
> int sectors_in_flight;
> int ret;
> + bool unmap;
> } MirrorBlockJob;
>
> typedef struct MirrorOp {
> @@ -651,6 +652,7 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
> int64_t buf_size,
> BlockdevOnError on_source_error,
> BlockdevOnError on_target_error,
> + bool unmap,
> BlockCompletionFunc *cb,
> void *opaque, Error **errp,
> const BlockJobDriver *driver,
I forgot to assign unmap to s->unmap. I'll respin the series.
Fam
> @@ -703,6 +705,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
> int64_t speed, uint32_t granularity, int64_t buf_size,
> MirrorSyncMode mode, BlockdevOnError on_source_error,
> BlockdevOnError on_target_error,
> + bool unmap,
> BlockCompletionFunc *cb,
> void *opaque, Error **errp)
> {
> @@ -717,7 +720,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
> base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
> mirror_start_job(bs, target, replaces,
> speed, granularity, buf_size,
> - on_source_error, on_target_error, cb, opaque, errp,
> + on_source_error, on_target_error, unmap, cb, opaque, errp,
> &mirror_job_driver, is_none_mode, base);
> }
>
> @@ -765,7 +768,7 @@ void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
>
> bdrv_ref(base);
> mirror_start_job(bs, base, NULL, speed, 0, 0,
> - on_error, on_error, cb, opaque, &local_err,
> + on_error, on_error, false, cb, opaque, &local_err,
> &commit_active_job_driver, false, base);
> if (local_err) {
> error_propagate(errp, local_err);
> diff --git a/blockdev.c b/blockdev.c
> index de94a8b..6a45555 100644
> --- a/blockdev.c
> +++ b/blockdev.c
> @@ -2631,6 +2631,7 @@ void qmp_drive_mirror(const char *device, const char *target,
> bool has_buf_size, int64_t buf_size,
> bool has_on_source_error, BlockdevOnError on_source_error,
> bool has_on_target_error, BlockdevOnError on_target_error,
> + bool has_unmap, bool unmap,
> Error **errp)
> {
> BlockBackend *blk;
> @@ -2662,6 +2663,9 @@ void qmp_drive_mirror(const char *device, const char *target,
> if (!has_buf_size) {
> buf_size = DEFAULT_MIRROR_BUF_SIZE;
> }
> + if (!has_unmap) {
> + unmap = true;
> + }
>
> if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
> error_set(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
> @@ -2801,6 +2805,7 @@ void qmp_drive_mirror(const char *device, const char *target,
> has_replaces ? replaces : NULL,
> speed, granularity, buf_size, sync,
> on_source_error, on_target_error,
> + unmap,
> block_job_cb, bs, &local_err);
> if (local_err != NULL) {
> bdrv_unref(target_bs);
> diff --git a/hmp.c b/hmp.c
> index 514f22f..d5b9ebb 100644
> --- a/hmp.c
> +++ b/hmp.c
> @@ -1057,7 +1057,7 @@ void hmp_drive_mirror(Monitor *mon, const QDict *qdict)
> false, NULL, false, NULL,
> full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP,
> true, mode, false, 0, false, 0, false, 0,
> - false, 0, false, 0, &err);
> + false, 0, false, 0, false, true, &err);
> hmp_handle_error(mon, &err);
> }
>
> diff --git a/include/block/block_int.h b/include/block/block_int.h
> index f004378..4e0d700 100644
> --- a/include/block/block_int.h
> +++ b/include/block/block_int.h
> @@ -590,6 +590,7 @@ void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
> * @mode: Whether to collapse all images in the chain to the target.
> * @on_source_error: The action to take upon error reading from the source.
> * @on_target_error: The action to take upon error writing to the target.
> + * @unmap: Whether to unmap target where source sectors only contain zeroes.
> * @cb: Completion function for the job.
> * @opaque: Opaque pointer value passed to @cb.
> * @errp: Error object.
> @@ -604,6 +605,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target,
> int64_t speed, uint32_t granularity, int64_t buf_size,
> MirrorSyncMode mode, BlockdevOnError on_source_error,
> BlockdevOnError on_target_error,
> + bool unmap,
> BlockCompletionFunc *cb,
> void *opaque, Error **errp);
>
> diff --git a/qapi/block-core.json b/qapi/block-core.json
> index 8411d4f..71ed838 100644
> --- a/qapi/block-core.json
> +++ b/qapi/block-core.json
> @@ -973,6 +973,11 @@
> # @on-target-error: #optional the action to take on an error on the target,
> # default 'report' (no limitations, since this applies to
> # a different block device than @device).
> +# @unmap: #optional Whether to try to unmap target sectors where source has
> +# only zero. If true, and target unallocated sectors will read as zero,
> +# target image sectors will be unmapped; otherwise, zeroes will be
> +# written. Both will result in identical contents.
> +# Default is true. (Since 2.4)
> #
> # Returns: nothing on success
> # If @device is not a valid block device, DeviceNotFound
> @@ -985,7 +990,8 @@
> 'sync': 'MirrorSyncMode', '*mode': 'NewImageMode',
> '*speed': 'int', '*granularity': 'uint32',
> '*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
> - '*on-target-error': 'BlockdevOnError' } }
> + '*on-target-error': 'BlockdevOnError',
> + '*unmap': 'bool' } }
>
> ##
> # @BlockDirtyBitmap
> diff --git a/qmp-commands.hx b/qmp-commands.hx
> index 867a21f..3b33496 100644
> --- a/qmp-commands.hx
> +++ b/qmp-commands.hx
> @@ -1503,6 +1503,7 @@ EQMP
> .args_type = "sync:s,device:B,target:s,speed:i?,mode:s?,format:s?,"
> "node-name:s?,replaces:s?,"
> "on-source-error:s?,on-target-error:s?,"
> + "unmap:b?,"
> "granularity:i?,buf-size:i?",
> .mhandler.cmd_new = qmp_marshal_input_drive_mirror,
> },
> @@ -1542,6 +1543,8 @@ Arguments:
> (BlockdevOnError, default 'report')
> - "on-target-error": the action to take on an error on the target
> (BlockdevOnError, default 'report')
> +- "unmap": whether the target sectors should be discarded where source has only
> + zeroes. (json-bool, optional, default true)
>
> The default value of the granularity is the image cluster size clamped
> between 4096 and 65536, if the image format defines one. If the format
> --
> 2.4.2
>
^ permalink raw reply [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 03/17] mirror: Do zero write on target if sectors not allocated
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 01/17] block: Add bdrv_get_block_status_above Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 02/17] qmp: Add optional bool "unmap" to drive-mirror Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 04/17] block: Fix dirty bitmap in bdrv_co_discard Stefan Hajnoczi
` (14 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
If guest discards a source cluster, mirroring with bdrv_aio_readv is overkill.
Some protocols do zero upon discard, where it's best to use
bdrv_aio_write_zeroes, otherwise, bdrv_aio_discard will be enough.
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1432790990-25383-4-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/mirror.c | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/block/mirror.c b/block/mirror.c
index 85995b2..ba33254 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -164,6 +164,8 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
uint64_t delay_ns = 0;
MirrorOp *op;
+ int pnum;
+ int64_t ret;
s->sector_num = hbitmap_iter_next(&s->hbi);
if (s->sector_num < 0) {
@@ -290,8 +292,22 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
s->in_flight++;
s->sectors_in_flight += nb_sectors;
trace_mirror_one_iteration(s, sector_num, nb_sectors);
- bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
- mirror_read_complete, op);
+
+ ret = bdrv_get_block_status_above(source, NULL, sector_num,
+ nb_sectors, &pnum);
+ if (ret < 0 || pnum < nb_sectors ||
+ (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) {
+ bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
+ mirror_read_complete, op);
+ } else if (ret & BDRV_BLOCK_ZERO) {
+ bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors,
+ s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
+ mirror_write_complete, op);
+ } else {
+ assert(!(ret & BDRV_BLOCK_DATA));
+ bdrv_aio_discard(s->target, sector_num, op->nb_sectors,
+ mirror_write_complete, op);
+ }
return delay_ns;
}
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 04/17] block: Fix dirty bitmap in bdrv_co_discard
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (2 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 03/17] mirror: Do zero write on target if sectors not allocated Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 05/17] block: Remove bdrv_reset_dirty Stefan Hajnoczi
` (13 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
Unsetting dirty globally with discard is not very correct. The discard may zero
out sectors (depending on can_write_zeroes_with_unmap), we should replicate
this change to destination side to make sure that the guest sees the same data.
Calling bdrv_reset_dirty also troubles mirror job because the hbitmap iterator
doesn't expect unsetting of bits after current position.
So let's do it the opposite way which fixes both problems: set the dirty bits
if we are to discard it.
Reported-by: wangxiaolong@ucloud.cn
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1432790990-25383-5-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/io.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/block/io.c b/block/io.c
index 41463a6..a71346c 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2440,8 +2440,6 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
return -EPERM;
}
- bdrv_reset_dirty(bs, sector_num, nb_sectors);
-
/* Do nothing if disabled. */
if (!(bs->open_flags & BDRV_O_UNMAP)) {
return 0;
@@ -2451,6 +2449,8 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
return 0;
}
+ bdrv_set_dirty(bs, sector_num, nb_sectors);
+
max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
while (nb_sectors > 0) {
int ret;
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 05/17] block: Remove bdrv_reset_dirty
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (3 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 04/17] block: Fix dirty bitmap in bdrv_co_discard Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 06/17] qemu-iotests: Make block job methods common Stefan Hajnoczi
` (12 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
Using this function would always be wrong because a dirty bitmap must
have a specific owner that consumes the dirty bits and calls
bdrv_reset_dirty_bitmap().
Remove the unused function to avoid future misuse.
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Message-id: 1432790990-25383-6-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block.c | 12 ------------
include/block/block_int.h | 2 --
2 files changed, 14 deletions(-)
diff --git a/block.c b/block.c
index 2b9ceae..9890707 100644
--- a/block.c
+++ b/block.c
@@ -3347,18 +3347,6 @@ void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
}
}
-void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
- int nr_sectors)
-{
- BdrvDirtyBitmap *bitmap;
- QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
- if (!bdrv_dirty_bitmap_enabled(bitmap)) {
- continue;
- }
- hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
- }
-}
-
/**
* Advance an HBitmapIter to an arbitrary offset.
*/
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 4e0d700..459fe1c 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -640,7 +640,5 @@ bool blk_dev_is_medium_locked(BlockBackend *blk);
void blk_dev_resize_cb(BlockBackend *blk);
void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
-void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
- int nr_sectors);
#endif /* BLOCK_INT_H */
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 06/17] qemu-iotests: Make block job methods common
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (4 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 05/17] block: Remove bdrv_reset_dirty Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 07/17] qemu-iotests: Add test case for mirror with unmap Stefan Hajnoczi
` (11 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1432790990-25383-7-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
tests/qemu-iotests/041 | 66 ++++++++++---------------------------------
tests/qemu-iotests/iotests.py | 28 ++++++++++++++++++
2 files changed, 43 insertions(+), 51 deletions(-)
diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041
index 59a8f73..3d46ed7 100755
--- a/tests/qemu-iotests/041
+++ b/tests/qemu-iotests/041
@@ -34,38 +34,8 @@ quorum_img3 = os.path.join(iotests.test_dir, 'quorum3.img')
quorum_repair_img = os.path.join(iotests.test_dir, 'quorum_repair.img')
quorum_snapshot_file = os.path.join(iotests.test_dir, 'quorum_snapshot.img')
-class ImageMirroringTestCase(iotests.QMPTestCase):
- '''Abstract base class for image mirroring test cases'''
- def wait_ready(self, drive='drive0'):
- '''Wait until a block job BLOCK_JOB_READY event'''
- ready = False
- while not ready:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_READY':
- self.assert_qmp(event, 'data/type', 'mirror')
- self.assert_qmp(event, 'data/device', drive)
- ready = True
-
- def wait_ready_and_cancel(self, drive='drive0'):
- self.wait_ready(drive=drive)
- event = self.cancel_and_wait(drive=drive)
- self.assertEquals(event['event'], 'BLOCK_JOB_COMPLETED')
- self.assert_qmp(event, 'data/type', 'mirror')
- self.assert_qmp(event, 'data/offset', event['data']['len'])
-
- def complete_and_wait(self, drive='drive0', wait_ready=True):
- '''Complete a block job and wait for it to finish'''
- if wait_ready:
- self.wait_ready(drive=drive)
-
- result = self.vm.qmp('block-job-complete', device=drive)
- self.assert_qmp(result, 'return', {})
-
- event = self.wait_until_completed(drive=drive)
- self.assert_qmp(event, 'data/type', 'mirror')
-
-class TestSingleDrive(ImageMirroringTestCase):
+class TestSingleDrive(iotests.QMPTestCase):
image_len = 1 * 1024 * 1024 # MB
def setUp(self):
@@ -221,17 +191,9 @@ class TestSingleDriveUnalignedLength(TestSingleDrive):
test_small_buffer2 = None
test_large_cluster = None
-class TestMirrorNoBacking(ImageMirroringTestCase):
+class TestMirrorNoBacking(iotests.QMPTestCase):
image_len = 2 * 1024 * 1024 # MB
- def complete_and_wait(self, drive='drive0', wait_ready=True):
- iotests.create_image(target_backing_img, TestMirrorNoBacking.image_len)
- return ImageMirroringTestCase.complete_and_wait(self, drive, wait_ready)
-
- def compare_images(self, img1, img2):
- iotests.create_image(target_backing_img, TestMirrorNoBacking.image_len)
- return iotests.compare_images(img1, img2)
-
def setUp(self):
iotests.create_image(backing_img, TestMirrorNoBacking.image_len)
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, test_img)
@@ -242,7 +204,10 @@ class TestMirrorNoBacking(ImageMirroringTestCase):
self.vm.shutdown()
os.remove(test_img)
os.remove(backing_img)
- os.remove(target_backing_img)
+ try:
+ os.remove(target_backing_img)
+ except:
+ pass
os.remove(target_img)
def test_complete(self):
@@ -257,7 +222,7 @@ class TestMirrorNoBacking(ImageMirroringTestCase):
result = self.vm.qmp('query-block')
self.assert_qmp(result, 'return[0]/inserted/file', target_img)
self.vm.shutdown()
- self.assertTrue(self.compare_images(test_img, target_img),
+ self.assertTrue(iotests.compare_images(test_img, target_img),
'target image does not match source after mirroring')
def test_cancel(self):
@@ -272,7 +237,7 @@ class TestMirrorNoBacking(ImageMirroringTestCase):
result = self.vm.qmp('query-block')
self.assert_qmp(result, 'return[0]/inserted/file', test_img)
self.vm.shutdown()
- self.assertTrue(self.compare_images(test_img, target_img),
+ self.assertTrue(iotests.compare_images(test_img, target_img),
'target image does not match source after mirroring')
def test_large_cluster(self):
@@ -283,7 +248,6 @@ class TestMirrorNoBacking(ImageMirroringTestCase):
%(TestMirrorNoBacking.image_len), target_backing_img)
qemu_img('create', '-f', iotests.imgfmt, '-o', 'cluster_size=%d,backing_file=%s'
% (TestMirrorNoBacking.image_len, target_backing_img), target_img)
- os.remove(target_backing_img)
result = self.vm.qmp('drive-mirror', device='drive0', sync='full',
mode='existing', target=target_img)
@@ -293,10 +257,10 @@ class TestMirrorNoBacking(ImageMirroringTestCase):
result = self.vm.qmp('query-block')
self.assert_qmp(result, 'return[0]/inserted/file', target_img)
self.vm.shutdown()
- self.assertTrue(self.compare_images(test_img, target_img),
+ self.assertTrue(iotests.compare_images(test_img, target_img),
'target image does not match source after mirroring')
-class TestMirrorResized(ImageMirroringTestCase):
+class TestMirrorResized(iotests.QMPTestCase):
backing_len = 1 * 1024 * 1024 # MB
image_len = 2 * 1024 * 1024 # MB
@@ -344,7 +308,7 @@ class TestMirrorResized(ImageMirroringTestCase):
self.assertTrue(iotests.compare_images(test_img, target_img),
'target image does not match source after mirroring')
-class TestReadErrors(ImageMirroringTestCase):
+class TestReadErrors(iotests.QMPTestCase):
image_len = 2 * 1024 * 1024 # MB
# this should be a multiple of twice the default granularity
@@ -498,7 +462,7 @@ new_state = "1"
self.assert_no_active_block_jobs()
self.vm.shutdown()
-class TestWriteErrors(ImageMirroringTestCase):
+class TestWriteErrors(iotests.QMPTestCase):
image_len = 2 * 1024 * 1024 # MB
# this should be a multiple of twice the default granularity
@@ -624,7 +588,7 @@ new_state = "1"
self.assert_no_active_block_jobs()
self.vm.shutdown()
-class TestSetSpeed(ImageMirroringTestCase):
+class TestSetSpeed(iotests.QMPTestCase):
image_len = 80 * 1024 * 1024 # MB
def setUp(self):
@@ -690,7 +654,7 @@ class TestSetSpeed(ImageMirroringTestCase):
self.wait_ready_and_cancel()
-class TestUnbackedSource(ImageMirroringTestCase):
+class TestUnbackedSource(iotests.QMPTestCase):
image_len = 2 * 1024 * 1024 # MB
def setUp(self):
@@ -731,7 +695,7 @@ class TestUnbackedSource(ImageMirroringTestCase):
self.complete_and_wait()
self.assert_no_active_block_jobs()
-class TestRepairQuorum(ImageMirroringTestCase):
+class TestRepairQuorum(iotests.QMPTestCase):
""" This class test quorum file repair using drive-mirror.
It's mostly a fork of TestSingleDrive """
image_len = 1 * 1024 * 1024 # MB
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 04a294d..63de726 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -326,6 +326,34 @@ class QMPTestCase(unittest.TestCase):
self.assert_no_active_block_jobs()
return event
+ def wait_ready(self, drive='drive0'):
+ '''Wait until a block job BLOCK_JOB_READY event'''
+ ready = False
+ while not ready:
+ for event in self.vm.get_qmp_events(wait=True):
+ if event['event'] == 'BLOCK_JOB_READY':
+ self.assert_qmp(event, 'data/type', 'mirror')
+ self.assert_qmp(event, 'data/device', drive)
+ ready = True
+
+ def wait_ready_and_cancel(self, drive='drive0'):
+ self.wait_ready(drive=drive)
+ event = self.cancel_and_wait(drive=drive)
+ self.assertEquals(event['event'], 'BLOCK_JOB_COMPLETED')
+ self.assert_qmp(event, 'data/type', 'mirror')
+ self.assert_qmp(event, 'data/offset', event['data']['len'])
+
+ def complete_and_wait(self, drive='drive0', wait_ready=True):
+ '''Complete a block job and wait for it to finish'''
+ if wait_ready:
+ self.wait_ready(drive=drive)
+
+ result = self.vm.qmp('block-job-complete', device=drive)
+ self.assert_qmp(result, 'return', {})
+
+ event = self.wait_until_completed(drive=drive)
+ self.assert_qmp(event, 'data/type', 'mirror')
+
def notrun(reason):
'''Skip this test suite'''
# Each test in qemu-iotests has a number ("seq")
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 07/17] qemu-iotests: Add test case for mirror with unmap
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (5 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 06/17] qemu-iotests: Make block job methods common Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 08/17] iotests: Use event_wait in wait_ready Stefan Hajnoczi
` (10 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
This checks that the discard on mirror source that effectively zeroes
data is also reflected by the data of target.
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1432790990-25383-8-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
tests/qemu-iotests/132 | 59 ++++++++++++++++++++++++++++++++++++++++++++++
tests/qemu-iotests/132.out | 5 ++++
tests/qemu-iotests/group | 1 +
3 files changed, 65 insertions(+)
create mode 100644 tests/qemu-iotests/132
create mode 100644 tests/qemu-iotests/132.out
diff --git a/tests/qemu-iotests/132 b/tests/qemu-iotests/132
new file mode 100644
index 0000000..f53ef6e
--- /dev/null
+++ b/tests/qemu-iotests/132
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+#
+# Test mirror with unmap
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import time
+import os
+import iotests
+from iotests import qemu_img, qemu_io
+
+test_img = os.path.join(iotests.test_dir, 'test.img')
+target_img = os.path.join(iotests.test_dir, 'target.img')
+
+class TestSingleDrive(iotests.QMPTestCase):
+ image_len = 2 * 1024 * 1024 # MB
+
+ def setUp(self):
+ # Write data to the image so we can compare later
+ qemu_img('create', '-f', iotests.imgfmt, test_img, str(TestSingleDrive.image_len))
+ qemu_io('-f', iotests.imgfmt, '-c', 'write -P0x5d 0 2M', test_img)
+
+ self.vm = iotests.VM().add_drive(test_img, 'discard=unmap')
+ self.vm.launch()
+
+ def tearDown(self):
+ self.vm.shutdown()
+ os.remove(test_img)
+ try:
+ os.remove(target_img)
+ except OSError:
+ pass
+
+ def test_mirror_discard(self):
+ result = self.vm.qmp('drive-mirror', device='drive0', sync='full',
+ target=target_img)
+ self.assert_qmp(result, 'return', {})
+ self.vm.hmp_qemu_io('drive0', 'discard 0 64k')
+ self.complete_and_wait('drive0')
+ self.vm.shutdown()
+ self.assertTrue(iotests.compare_images(test_img, target_img),
+ 'target image does not match source after mirroring')
+
+if __name__ == '__main__':
+ iotests.main(supported_fmts=['raw', 'qcow2'])
diff --git a/tests/qemu-iotests/132.out b/tests/qemu-iotests/132.out
new file mode 100644
index 0000000..ae1213e
--- /dev/null
+++ b/tests/qemu-iotests/132.out
@@ -0,0 +1,5 @@
+.
+----------------------------------------------------------------------
+Ran 1 tests
+
+OK
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index 0b817ca..757ac1b 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -129,4 +129,5 @@
129 rw auto quick
130 rw auto quick
131 rw auto quick
+132 rw auto quick
134 rw auto quick
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 08/17] iotests: Use event_wait in wait_ready
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (6 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 07/17] qemu-iotests: Add test case for mirror with unmap Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 09/17] Revert "iothread: release iothread around aio_poll" Stefan Hajnoczi
` (9 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Fam Zheng, Stefan Hajnoczi
From: Fam Zheng <famz@redhat.com>
Only poll the specific type of event we are interested in, to avoid
stealing events that should be consumed by someone else.
Suggested-by: John Snow <jsnow@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1432790990-25383-9-git-send-email-famz@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
tests/qemu-iotests/iotests.py | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 63de726..8615b10 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -328,13 +328,8 @@ class QMPTestCase(unittest.TestCase):
def wait_ready(self, drive='drive0'):
'''Wait until a block job BLOCK_JOB_READY event'''
- ready = False
- while not ready:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_READY':
- self.assert_qmp(event, 'data/type', 'mirror')
- self.assert_qmp(event, 'data/device', drive)
- ready = True
+ f = {'data': {'type': 'mirror', 'device': drive } }
+ event = self.vm.event_wait(name='BLOCK_JOB_READY', match=f)
def wait_ready_and_cancel(self, drive='drive0'):
self.wait_ready(drive=drive)
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 09/17] Revert "iothread: release iothread around aio_poll"
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (7 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 08/17] iotests: Use event_wait in wait_ready Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 10/17] throttle: Extract timers from ThrottleState into a separate structure Stefan Hajnoczi
` (8 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Stefan Hajnoczi
This reverts commit a0710f7995f914e3044e5899bd8ff6c43c62f916.
In qemu-devel email message <556DBF87.2020908@de.ibm.com>, Christian
Borntraeger writes:
Having many guests all with a kernel/ramdisk (via -kernel) and
several null block devices will result in hangs. All hanging
guests are in partition detection code waiting for an I/O to return
so very early maybe even the first I/O.
Reverting that commit "fixes" the hangs.
Reverting this commit for the 2.4 release. More time is needed to
investigate and correct this patch.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
async.c | 8 +++++++-
iothread.c | 11 +++++++++--
tests/test-aio.c | 19 ++++++++-----------
3 files changed, 24 insertions(+), 14 deletions(-)
diff --git a/async.c b/async.c
index 46d9e63..77d080d 100644
--- a/async.c
+++ b/async.c
@@ -280,6 +280,12 @@ static void aio_timerlist_notify(void *opaque)
aio_notify(opaque);
}
+static void aio_rfifolock_cb(void *opaque)
+{
+ /* Kick owner thread in case they are blocked in aio_poll() */
+ aio_notify(opaque);
+}
+
AioContext *aio_context_new(Error **errp)
{
int ret;
@@ -297,7 +303,7 @@ AioContext *aio_context_new(Error **errp)
event_notifier_test_and_clear);
ctx->thread_pool = NULL;
qemu_mutex_init(&ctx->bh_lock);
- rfifolock_init(&ctx->lock, NULL, NULL);
+ rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
return ctx;
diff --git a/iothread.c b/iothread.c
index 0416fc4..878a594 100644
--- a/iothread.c
+++ b/iothread.c
@@ -31,14 +31,21 @@ typedef ObjectClass IOThreadClass;
static void *iothread_run(void *opaque)
{
IOThread *iothread = opaque;
+ bool blocking;
qemu_mutex_lock(&iothread->init_done_lock);
iothread->thread_id = qemu_get_thread_id();
qemu_cond_signal(&iothread->init_done_cond);
qemu_mutex_unlock(&iothread->init_done_lock);
- while (!atomic_read(&iothread->stopping)) {
- aio_poll(iothread->ctx, true);
+ while (!iothread->stopping) {
+ aio_context_acquire(iothread->ctx);
+ blocking = true;
+ while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
+ /* Progress was made, keep going */
+ blocking = false;
+ }
+ aio_context_release(iothread->ctx);
}
return NULL;
}
diff --git a/tests/test-aio.c b/tests/test-aio.c
index 4b0cb45..a7cb5c9 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -107,7 +107,6 @@ static void test_notify(void)
typedef struct {
QemuMutex start_lock;
- EventNotifier notifier;
bool thread_acquired;
} AcquireTestData;
@@ -119,8 +118,6 @@ static void *test_acquire_thread(void *opaque)
qemu_mutex_lock(&data->start_lock);
qemu_mutex_unlock(&data->start_lock);
- g_usleep(500000);
- event_notifier_set(&data->notifier);
aio_context_acquire(ctx);
aio_context_release(ctx);
@@ -129,19 +126,20 @@ static void *test_acquire_thread(void *opaque)
return NULL;
}
-static void dummy_notifier_read(EventNotifier *n)
+static void dummy_notifier_read(EventNotifier *unused)
{
- event_notifier_test_and_clear(n);
+ g_assert(false); /* should never be invoked */
}
static void test_acquire(void)
{
QemuThread thread;
+ EventNotifier notifier;
AcquireTestData data;
/* Dummy event notifier ensures aio_poll() will block */
- event_notifier_init(&data.notifier, false);
- aio_set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
+ event_notifier_init(¬ifier, false);
+ aio_set_event_notifier(ctx, ¬ifier, dummy_notifier_read);
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
qemu_mutex_init(&data.start_lock);
@@ -155,13 +153,12 @@ static void test_acquire(void)
/* Block in aio_poll(), let other thread kick us and acquire context */
aio_context_acquire(ctx);
qemu_mutex_unlock(&data.start_lock); /* let the thread run */
- g_assert(aio_poll(ctx, true));
- g_assert(!data.thread_acquired);
+ g_assert(!aio_poll(ctx, true));
aio_context_release(ctx);
qemu_thread_join(&thread);
- aio_set_event_notifier(ctx, &data.notifier, NULL);
- event_notifier_cleanup(&data.notifier);
+ aio_set_event_notifier(ctx, ¬ifier, NULL);
+ event_notifier_cleanup(¬ifier);
g_assert(data.thread_acquired);
}
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 10/17] throttle: Extract timers from ThrottleState into a separate structure
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (8 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 09/17] Revert "iothread: release iothread around aio_poll" Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 11/17] throttle: Add throttle group infrastructure Stefan Hajnoczi
` (7 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel
Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi,
Benoît Canet
From: Benoît Canet <benoit.canet@nodalink.com>
Group throttling will share ThrottleState between multiple bs.
As a consequence the ThrottleState will be accessed by multiple aio
context.
Timers are tied to their aio context so they must go out of the
ThrottleState structure.
This commit paves the way for each bs of a common ThrottleState to
have its own timer.
Signed-off-by: Benoit Canet <benoit.canet@nodalink.com>
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 44288c978038588b3fd57d068a0ca1adb7f2537f.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block.c | 11 ++++---
block/io.c | 24 ++++++++------
include/block/block_int.h | 1 +
include/qemu/throttle.h | 38 ++++++++++++++--------
tests/test-throttle.c | 82 ++++++++++++++++++++++++++---------------------
util/throttle.c | 73 ++++++++++++++++++++++++-----------------
6 files changed, 135 insertions(+), 94 deletions(-)
diff --git a/block.c b/block.c
index 9890707..7a02db9 100644
--- a/block.c
+++ b/block.c
@@ -1825,6 +1825,9 @@ static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
memcpy(&bs_dest->throttle_state,
&bs_src->throttle_state,
sizeof(ThrottleState));
+ memcpy(&bs_dest->throttle_timers,
+ &bs_src->throttle_timers,
+ sizeof(ThrottleTimers));
bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
@@ -1886,7 +1889,7 @@ void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
assert(bs_new->job == NULL);
assert(bs_new->io_limits_enabled == false);
- assert(!throttle_have_timer(&bs_new->throttle_state));
+ assert(!throttle_timers_are_initialized(&bs_new->throttle_timers));
tmp = *bs_new;
*bs_new = *bs_old;
@@ -1903,7 +1906,7 @@ void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
/* Check a few fields that should remain attached to the device */
assert(bs_new->job == NULL);
assert(bs_new->io_limits_enabled == false);
- assert(!throttle_have_timer(&bs_new->throttle_state));
+ assert(!throttle_timers_are_initialized(&bs_new->throttle_timers));
/* insert the nodes back into the graph node list if needed */
if (bs_new->node_name[0] != '\0') {
@@ -3679,7 +3682,7 @@ void bdrv_detach_aio_context(BlockDriverState *bs)
}
if (bs->io_limits_enabled) {
- throttle_detach_aio_context(&bs->throttle_state);
+ throttle_timers_detach_aio_context(&bs->throttle_timers);
}
if (bs->drv->bdrv_detach_aio_context) {
bs->drv->bdrv_detach_aio_context(bs);
@@ -3715,7 +3718,7 @@ void bdrv_attach_aio_context(BlockDriverState *bs,
bs->drv->bdrv_attach_aio_context(bs, new_context);
}
if (bs->io_limits_enabled) {
- throttle_attach_aio_context(&bs->throttle_state, new_context);
+ throttle_timers_attach_aio_context(&bs->throttle_timers, new_context);
}
QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
diff --git a/block/io.c b/block/io.c
index a71346c..ce5b4ec 100644
--- a/block/io.c
+++ b/block/io.c
@@ -65,7 +65,7 @@ void bdrv_set_io_limits(BlockDriverState *bs,
{
int i;
- throttle_config(&bs->throttle_state, cfg);
+ throttle_config(&bs->throttle_state, &bs->throttle_timers, cfg);
for (i = 0; i < 2; i++) {
qemu_co_enter_next(&bs->throttled_reqs[i]);
@@ -98,7 +98,7 @@ void bdrv_io_limits_disable(BlockDriverState *bs)
bdrv_start_throttled_reqs(bs);
- throttle_destroy(&bs->throttle_state);
+ throttle_timers_destroy(&bs->throttle_timers);
}
static void bdrv_throttle_read_timer_cb(void *opaque)
@@ -123,12 +123,13 @@ void bdrv_io_limits_enable(BlockDriverState *bs)
clock_type = QEMU_CLOCK_VIRTUAL;
}
assert(!bs->io_limits_enabled);
- throttle_init(&bs->throttle_state,
- bdrv_get_aio_context(bs),
- clock_type,
- bdrv_throttle_read_timer_cb,
- bdrv_throttle_write_timer_cb,
- bs);
+ throttle_init(&bs->throttle_state);
+ throttle_timers_init(&bs->throttle_timers,
+ bdrv_get_aio_context(bs),
+ clock_type,
+ bdrv_throttle_read_timer_cb,
+ bdrv_throttle_write_timer_cb,
+ bs);
bs->io_limits_enabled = true;
}
@@ -142,7 +143,9 @@ static void bdrv_io_limits_intercept(BlockDriverState *bs,
bool is_write)
{
/* does this io must wait */
- bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
+ bool must_wait = throttle_schedule_timer(&bs->throttle_state,
+ &bs->throttle_timers,
+ is_write);
/* if must wait or any request of this type throttled queue the IO */
if (must_wait ||
@@ -155,7 +158,8 @@ static void bdrv_io_limits_intercept(BlockDriverState *bs,
/* if the next request must wait -> do nothing */
- if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
+ if (throttle_schedule_timer(&bs->throttle_state, &bs->throttle_timers,
+ is_write)) {
return;
}
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 459fe1c..d022164 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -380,6 +380,7 @@ struct BlockDriverState {
/* I/O throttling */
ThrottleState throttle_state;
+ ThrottleTimers throttle_timers;
CoQueue throttled_reqs[2];
bool io_limits_enabled;
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
index b890613..2c560db 100644
--- a/include/qemu/throttle.h
+++ b/include/qemu/throttle.h
@@ -65,14 +65,17 @@ typedef struct ThrottleConfig {
typedef struct ThrottleState {
ThrottleConfig cfg; /* configuration */
int64_t previous_leak; /* timestamp of the last leak done */
- QEMUTimer * timers[2]; /* timers used to do the throttling */
+} ThrottleState;
+
+typedef struct ThrottleTimers {
+ QEMUTimer *timers[2]; /* timers used to do the throttling */
QEMUClockType clock_type; /* the clock used */
/* Callbacks */
QEMUTimerCB *read_timer_cb;
QEMUTimerCB *write_timer_cb;
void *timer_opaque;
-} ThrottleState;
+} ThrottleTimers;
/* operations on single leaky buckets */
void throttle_leak_bucket(LeakyBucket *bkt, int64_t delta);
@@ -86,20 +89,23 @@ bool throttle_compute_timer(ThrottleState *ts,
int64_t *next_timestamp);
/* init/destroy cycle */
-void throttle_init(ThrottleState *ts,
- AioContext *aio_context,
- QEMUClockType clock_type,
- void (read_timer)(void *),
- void (write_timer)(void *),
- void *timer_opaque);
+void throttle_init(ThrottleState *ts);
-void throttle_destroy(ThrottleState *ts);
+void throttle_timers_init(ThrottleTimers *tt,
+ AioContext *aio_context,
+ QEMUClockType clock_type,
+ QEMUTimerCB *read_timer_cb,
+ QEMUTimerCB *write_timer_cb,
+ void *timer_opaque);
-void throttle_detach_aio_context(ThrottleState *ts);
+void throttle_timers_destroy(ThrottleTimers *tt);
-void throttle_attach_aio_context(ThrottleState *ts, AioContext *new_context);
+void throttle_timers_detach_aio_context(ThrottleTimers *tt);
-bool throttle_have_timer(ThrottleState *ts);
+void throttle_timers_attach_aio_context(ThrottleTimers *tt,
+ AioContext *new_context);
+
+bool throttle_timers_are_initialized(ThrottleTimers *tt);
/* configuration */
bool throttle_enabled(ThrottleConfig *cfg);
@@ -108,12 +114,16 @@ bool throttle_conflicting(ThrottleConfig *cfg);
bool throttle_is_valid(ThrottleConfig *cfg);
-void throttle_config(ThrottleState *ts, ThrottleConfig *cfg);
+void throttle_config(ThrottleState *ts,
+ ThrottleTimers *tt,
+ ThrottleConfig *cfg);
void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg);
/* usage */
-bool throttle_schedule_timer(ThrottleState *ts, bool is_write);
+bool throttle_schedule_timer(ThrottleState *ts,
+ ThrottleTimers *tt,
+ bool is_write);
void throttle_account(ThrottleState *ts, bool is_write, uint64_t size);
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
index d8ba415..458f577 100644
--- a/tests/test-throttle.c
+++ b/tests/test-throttle.c
@@ -20,6 +20,7 @@ static AioContext *ctx;
static LeakyBucket bkt;
static ThrottleConfig cfg;
static ThrottleState ts;
+static ThrottleTimers tt;
/* useful function */
static bool double_cmp(double x, double y)
@@ -103,17 +104,19 @@ static void test_init(void)
{
int i;
- /* fill the structure with crap */
+ /* fill the structures with crap */
memset(&ts, 1, sizeof(ts));
+ memset(&tt, 1, sizeof(tt));
- /* init the structure */
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ /* init structures */
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* check initialized fields */
- g_assert(ts.clock_type == QEMU_CLOCK_VIRTUAL);
- g_assert(ts.timers[0]);
- g_assert(ts.timers[1]);
+ g_assert(tt.clock_type == QEMU_CLOCK_VIRTUAL);
+ g_assert(tt.timers[0]);
+ g_assert(tt.timers[1]);
/* check other fields where cleared */
g_assert(!ts.previous_leak);
@@ -124,17 +127,18 @@ static void test_init(void)
g_assert(!ts.cfg.buckets[i].level);
}
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
}
static void test_destroy(void)
{
int i;
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
- throttle_destroy(&ts);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
+ throttle_timers_destroy(&tt);
for (i = 0; i < 2; i++) {
- g_assert(!ts.timers[i]);
+ g_assert(!tt.timers[i]);
}
}
@@ -170,11 +174,12 @@ static void test_config_functions(void)
orig_cfg.op_size = 1;
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* structure reset by throttle_init previous_leak should be null */
g_assert(!ts.previous_leak);
- throttle_config(&ts, &orig_cfg);
+ throttle_config(&ts, &tt, &orig_cfg);
/* has previous leak been initialized by throttle_config ? */
g_assert(ts.previous_leak);
@@ -182,7 +187,7 @@ static void test_config_functions(void)
/* get back the fixed configuration */
throttle_get_config(&ts, &final_cfg);
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].avg == 153);
g_assert(final_cfg.buckets[THROTTLE_BPS_READ].avg == 56);
@@ -323,43 +328,47 @@ static void test_is_valid(void)
static void test_have_timer(void)
{
- /* zero the structure */
+ /* zero structures */
memset(&ts, 0, sizeof(ts));
+ memset(&tt, 0, sizeof(tt));
/* no timer set should return false */
- g_assert(!throttle_have_timer(&ts));
+ g_assert(!throttle_timers_are_initialized(&tt));
- /* init the structure */
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ /* init structures */
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* timer set by init should return true */
- g_assert(throttle_have_timer(&ts));
+ g_assert(throttle_timers_are_initialized(&tt));
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
}
static void test_detach_attach(void)
{
- /* zero the structure */
+ /* zero structures */
memset(&ts, 0, sizeof(ts));
+ memset(&tt, 0, sizeof(tt));
/* init the structure */
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* timer set by init should return true */
- g_assert(throttle_have_timer(&ts));
+ g_assert(throttle_timers_are_initialized(&tt));
/* timer should no longer exist after detaching */
- throttle_detach_aio_context(&ts);
- g_assert(!throttle_have_timer(&ts));
+ throttle_timers_detach_aio_context(&tt);
+ g_assert(!throttle_timers_are_initialized(&tt));
/* timer should exist again after attaching */
- throttle_attach_aio_context(&ts, ctx);
- g_assert(throttle_have_timer(&ts));
+ throttle_timers_attach_aio_context(&tt, ctx);
+ g_assert(throttle_timers_are_initialized(&tt));
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
}
static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
@@ -387,9 +396,10 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
cfg.op_size = op_size;
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
- throttle_config(&ts, &cfg);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
+ throttle_config(&ts, &tt, &cfg);
/* account a read */
throttle_account(&ts, false, size);
@@ -414,7 +424,7 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
return false;
}
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
return true;
}
diff --git a/util/throttle.c b/util/throttle.c
index f976ac7..d76a48e 100644
--- a/util/throttle.c
+++ b/util/throttle.c
@@ -159,29 +159,36 @@ bool throttle_compute_timer(ThrottleState *ts,
}
/* Add timers to event loop */
-void throttle_attach_aio_context(ThrottleState *ts, AioContext *new_context)
+void throttle_timers_attach_aio_context(ThrottleTimers *tt,
+ AioContext *new_context)
{
- ts->timers[0] = aio_timer_new(new_context, ts->clock_type, SCALE_NS,
- ts->read_timer_cb, ts->timer_opaque);
- ts->timers[1] = aio_timer_new(new_context, ts->clock_type, SCALE_NS,
- ts->write_timer_cb, ts->timer_opaque);
+ tt->timers[0] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
+ tt->read_timer_cb, tt->timer_opaque);
+ tt->timers[1] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
+ tt->write_timer_cb, tt->timer_opaque);
}
/* To be called first on the ThrottleState */
-void throttle_init(ThrottleState *ts,
- AioContext *aio_context,
- QEMUClockType clock_type,
- QEMUTimerCB *read_timer_cb,
- QEMUTimerCB *write_timer_cb,
- void *timer_opaque)
+void throttle_init(ThrottleState *ts)
{
memset(ts, 0, sizeof(ThrottleState));
+}
- ts->clock_type = clock_type;
- ts->read_timer_cb = read_timer_cb;
- ts->write_timer_cb = write_timer_cb;
- ts->timer_opaque = timer_opaque;
- throttle_attach_aio_context(ts, aio_context);
+/* To be called first on the ThrottleTimers */
+void throttle_timers_init(ThrottleTimers *tt,
+ AioContext *aio_context,
+ QEMUClockType clock_type,
+ QEMUTimerCB *read_timer_cb,
+ QEMUTimerCB *write_timer_cb,
+ void *timer_opaque)
+{
+ memset(tt, 0, sizeof(ThrottleTimers));
+
+ tt->clock_type = clock_type;
+ tt->read_timer_cb = read_timer_cb;
+ tt->write_timer_cb = write_timer_cb;
+ tt->timer_opaque = timer_opaque;
+ throttle_timers_attach_aio_context(tt, aio_context);
}
/* destroy a timer */
@@ -195,25 +202,25 @@ static void throttle_timer_destroy(QEMUTimer **timer)
}
/* Remove timers from event loop */
-void throttle_detach_aio_context(ThrottleState *ts)
+void throttle_timers_detach_aio_context(ThrottleTimers *tt)
{
int i;
for (i = 0; i < 2; i++) {
- throttle_timer_destroy(&ts->timers[i]);
+ throttle_timer_destroy(&tt->timers[i]);
}
}
-/* To be called last on the ThrottleState */
-void throttle_destroy(ThrottleState *ts)
+/* To be called last on the ThrottleTimers */
+void throttle_timers_destroy(ThrottleTimers *tt)
{
- throttle_detach_aio_context(ts);
+ throttle_timers_detach_aio_context(tt);
}
/* is any throttling timer configured */
-bool throttle_have_timer(ThrottleState *ts)
+bool throttle_timers_are_initialized(ThrottleTimers *tt)
{
- if (ts->timers[0]) {
+ if (tt->timers[0]) {
return true;
}
@@ -324,9 +331,12 @@ static void throttle_cancel_timer(QEMUTimer *timer)
/* Used to configure the throttle
*
* @ts: the throttle state we are working on
+ * @tt: the throttle timers we use in this aio context
* @cfg: the config to set
*/
-void throttle_config(ThrottleState *ts, ThrottleConfig *cfg)
+void throttle_config(ThrottleState *ts,
+ ThrottleTimers *tt,
+ ThrottleConfig *cfg)
{
int i;
@@ -336,10 +346,10 @@ void throttle_config(ThrottleState *ts, ThrottleConfig *cfg)
throttle_fix_bucket(&ts->cfg.buckets[i]);
}
- ts->previous_leak = qemu_clock_get_ns(ts->clock_type);
+ ts->previous_leak = qemu_clock_get_ns(tt->clock_type);
for (i = 0; i < 2; i++) {
- throttle_cancel_timer(ts->timers[i]);
+ throttle_cancel_timer(tt->timers[i]);
}
}
@@ -358,12 +368,15 @@ void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
*
* NOTE: this function is not unit tested due to it's usage of timer_mod
*
+ * @tt: the timers structure
* @is_write: the type of operation (read/write)
* @ret: true if the timer has been scheduled else false
*/
-bool throttle_schedule_timer(ThrottleState *ts, bool is_write)
+bool throttle_schedule_timer(ThrottleState *ts,
+ ThrottleTimers *tt,
+ bool is_write)
{
- int64_t now = qemu_clock_get_ns(ts->clock_type);
+ int64_t now = qemu_clock_get_ns(tt->clock_type);
int64_t next_timestamp;
bool must_wait;
@@ -378,12 +391,12 @@ bool throttle_schedule_timer(ThrottleState *ts, bool is_write)
}
/* request throttled and timer pending -> do nothing */
- if (timer_pending(ts->timers[is_write])) {
+ if (timer_pending(tt->timers[is_write])) {
return true;
}
/* request throttled and timer not pending -> arm timer */
- timer_mod(ts->timers[is_write], next_timestamp);
+ timer_mod(tt->timers[is_write], next_timestamp);
return true;
}
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 11/17] throttle: Add throttle group infrastructure
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (9 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 10/17] throttle: Extract timers from ThrottleState into a separate structure Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 12/17] throttle: Add throttle group infrastructure tests Stefan Hajnoczi
` (6 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi
From: Alberto Garcia <berto@igalia.com>
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 8b3b38890977b0a0363c6b82c4ee5545fcf0ba83.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/Makefile.objs | 1 +
block/throttle-groups.c | 261 ++++++++++++++++++++++++++++++++++++++++
include/block/block_int.h | 1 +
include/block/throttle-groups.h | 39 ++++++
4 files changed, 302 insertions(+)
create mode 100644 block/throttle-groups.c
create mode 100644 include/block/throttle-groups.h
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 0d8c2a4..c34fd7c 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -10,6 +10,7 @@ block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o
block-obj-$(CONFIG_POSIX) += raw-posix.o
block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
block-obj-y += null.o mirror.o io.o
+block-obj-y += throttle-groups.o
block-obj-y += nbd.o nbd-client.o sheepdog.o
block-obj-$(CONFIG_LIBISCSI) += iscsi.o
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
new file mode 100644
index 0000000..352077f
--- /dev/null
+++ b/block/throttle-groups.c
@@ -0,0 +1,261 @@
+/*
+ * QEMU block throttling group infrastructure
+ *
+ * Copyright (C) Nodalink, EURL. 2014
+ * Copyright (C) Igalia, S.L. 2015
+ *
+ * Authors:
+ * Benoît Canet <benoit.canet@nodalink.com>
+ * Alberto Garcia <berto@igalia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "block/throttle-groups.h"
+
+/* The ThrottleGroup structure (with its ThrottleState) is shared
+ * among different BlockDriverState and it's independent from
+ * AioContext, so in order to use it from different threads it needs
+ * its own locking.
+ *
+ * This locking is however handled internally in this file, so it's
+ * transparent to outside users.
+ *
+ * The whole ThrottleGroup structure is private and invisible to
+ * outside users, that only use it through its ThrottleState.
+ *
+ * In addition to the ThrottleGroup structure, BlockDriverState has
+ * fields that need to be accessed by other members of the group and
+ * therefore also need to be protected by this lock. Once a BDS is
+ * registered in a group those fields can be accessed by other threads
+ * any time.
+ *
+ * Again, all this is handled internally and is mostly transparent to
+ * the outside. The 'throttle_timers' field however has an additional
+ * constraint because it may be temporarily invalid (see for example
+ * bdrv_set_aio_context()). Therefore in this file a thread will
+ * access some other BDS's timers only after verifying that that BDS
+ * has throttled requests in the queue.
+ */
+typedef struct ThrottleGroup {
+ char *name; /* This is constant during the lifetime of the group */
+
+ QemuMutex lock; /* This lock protects the following four fields */
+ ThrottleState ts;
+ QLIST_HEAD(, BlockDriverState) head;
+ BlockDriverState *tokens[2];
+ bool any_timer_armed[2];
+
+ /* These two are protected by the global throttle_groups_lock */
+ unsigned refcount;
+ QTAILQ_ENTRY(ThrottleGroup) list;
+} ThrottleGroup;
+
+static QemuMutex throttle_groups_lock;
+static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
+ QTAILQ_HEAD_INITIALIZER(throttle_groups);
+
+/* Increments the reference count of a ThrottleGroup given its name.
+ *
+ * If no ThrottleGroup is found with the given name a new one is
+ * created.
+ *
+ * @name: the name of the ThrottleGroup
+ * @ret: the ThrottleGroup
+ */
+static ThrottleGroup *throttle_group_incref(const char *name)
+{
+ ThrottleGroup *tg = NULL;
+ ThrottleGroup *iter;
+
+ qemu_mutex_lock(&throttle_groups_lock);
+
+ /* Look for an existing group with that name */
+ QTAILQ_FOREACH(iter, &throttle_groups, list) {
+ if (!strcmp(name, iter->name)) {
+ tg = iter;
+ break;
+ }
+ }
+
+ /* Create a new one if not found */
+ if (!tg) {
+ tg = g_new0(ThrottleGroup, 1);
+ tg->name = g_strdup(name);
+ qemu_mutex_init(&tg->lock);
+ throttle_init(&tg->ts);
+ QLIST_INIT(&tg->head);
+
+ QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
+ }
+
+ tg->refcount++;
+
+ qemu_mutex_unlock(&throttle_groups_lock);
+
+ return tg;
+}
+
+/* Decrease the reference count of a ThrottleGroup.
+ *
+ * When the reference count reaches zero the ThrottleGroup is
+ * destroyed.
+ *
+ * @tg: The ThrottleGroup to unref
+ */
+static void throttle_group_unref(ThrottleGroup *tg)
+{
+ qemu_mutex_lock(&throttle_groups_lock);
+ if (--tg->refcount == 0) {
+ QTAILQ_REMOVE(&throttle_groups, tg, list);
+ qemu_mutex_destroy(&tg->lock);
+ g_free(tg->name);
+ g_free(tg);
+ }
+ qemu_mutex_unlock(&throttle_groups_lock);
+}
+
+/* Get the name from a BlockDriverState's ThrottleGroup. The name (and
+ * the pointer) is guaranteed to remain constant during the lifetime
+ * of the group.
+ *
+ * @bs: a BlockDriverState that is member of a throttling group
+ * @ret: the name of the group.
+ */
+const char *throttle_group_get_name(BlockDriverState *bs)
+{
+ ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+ return tg->name;
+}
+
+/* Return the next BlockDriverState in the round-robin sequence,
+ * simulating a circular list.
+ *
+ * This assumes that tg->lock is held.
+ *
+ * @bs: the current BlockDriverState
+ * @ret: the next BlockDriverState in the sequence
+ */
+static BlockDriverState *throttle_group_next_bs(BlockDriverState *bs)
+{
+ ThrottleState *ts = bs->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ BlockDriverState *next = QLIST_NEXT(bs, round_robin);
+
+ if (!next) {
+ return QLIST_FIRST(&tg->head);
+ }
+
+ return next;
+}
+
+/* Update the throttle configuration for a particular group. Similar
+ * to throttle_config(), but guarantees atomicity within the
+ * throttling group.
+ *
+ * @bs: a BlockDriverState that is member of the group
+ * @cfg: the configuration to set
+ */
+void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg)
+{
+ ThrottleTimers *tt = &bs->throttle_timers;
+ ThrottleState *ts = bs->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ qemu_mutex_lock(&tg->lock);
+ throttle_config(ts, tt, cfg);
+ /* throttle_config() cancels the timers */
+ tg->any_timer_armed[0] = tg->any_timer_armed[1] = false;
+ qemu_mutex_unlock(&tg->lock);
+}
+
+/* Get the throttle configuration from a particular group. Similar to
+ * throttle_get_config(), but guarantees atomicity within the
+ * throttling group.
+ *
+ * @bs: a BlockDriverState that is member of the group
+ * @cfg: the configuration will be written here
+ */
+void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg)
+{
+ ThrottleState *ts = bs->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ qemu_mutex_lock(&tg->lock);
+ throttle_get_config(ts, cfg);
+ qemu_mutex_unlock(&tg->lock);
+}
+
+/* Register a BlockDriverState in the throttling group, also updating
+ * its throttle_state pointer to point to it. If a throttling group
+ * with that name does not exist yet, it will be created.
+ *
+ * @bs: the BlockDriverState to insert
+ * @groupname: the name of the group
+ */
+void throttle_group_register_bs(BlockDriverState *bs, const char *groupname)
+{
+ int i;
+ ThrottleGroup *tg = throttle_group_incref(groupname);
+
+ bs->throttle_state = &tg->ts;
+
+ qemu_mutex_lock(&tg->lock);
+ /* If the ThrottleGroup is new set this BlockDriverState as the token */
+ for (i = 0; i < 2; i++) {
+ if (!tg->tokens[i]) {
+ tg->tokens[i] = bs;
+ }
+ }
+
+ QLIST_INSERT_HEAD(&tg->head, bs, round_robin);
+ qemu_mutex_unlock(&tg->lock);
+}
+
+/* Unregister a BlockDriverState from its group, removing it from the
+ * list and setting the throttle_state pointer to NULL.
+ *
+ * The group will be destroyed if it's empty after this operation.
+ *
+ * @bs: the BlockDriverState to remove
+ */
+void throttle_group_unregister_bs(BlockDriverState *bs)
+{
+ ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+ int i;
+
+ qemu_mutex_lock(&tg->lock);
+ for (i = 0; i < 2; i++) {
+ if (tg->tokens[i] == bs) {
+ BlockDriverState *token = throttle_group_next_bs(bs);
+ /* Take care of the case where this is the last bs in the group */
+ if (token == bs) {
+ token = NULL;
+ }
+ tg->tokens[i] = token;
+ }
+ }
+
+ /* remove the current bs from the list */
+ QLIST_REMOVE(bs, round_robin);
+ qemu_mutex_unlock(&tg->lock);
+
+ throttle_group_unref(tg);
+ bs->throttle_state = NULL;
+}
+
+static void throttle_groups_init(void)
+{
+ qemu_mutex_init(&throttle_groups_lock);
+}
+
+block_init(throttle_groups_init);
diff --git a/include/block/block_int.h b/include/block/block_int.h
index d022164..16444a9 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -383,6 +383,7 @@ struct BlockDriverState {
ThrottleTimers throttle_timers;
CoQueue throttled_reqs[2];
bool io_limits_enabled;
+ QLIST_ENTRY(BlockDriverState) round_robin;
/* I/O stats (display with "info blockstats"). */
BlockAcctStats stats;
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
new file mode 100644
index 0000000..b966ec7
--- /dev/null
+++ b/include/block/throttle-groups.h
@@ -0,0 +1,39 @@
+/*
+ * QEMU block throttling group infrastructure
+ *
+ * Copyright (C) Nodalink, EURL. 2014
+ * Copyright (C) Igalia, S.L. 2015
+ *
+ * Authors:
+ * Benoît Canet <benoit.canet@nodalink.com>
+ * Alberto Garcia <berto@igalia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef THROTTLE_GROUPS_H
+#define THROTTLE_GROUPS_H
+
+#include "qemu/throttle.h"
+#include "block/block_int.h"
+
+const char *throttle_group_get_name(BlockDriverState *bs);
+
+void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg);
+void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg);
+
+void throttle_group_register_bs(BlockDriverState *bs, const char *groupname);
+void throttle_group_unregister_bs(BlockDriverState *bs);
+
+#endif
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 12/17] throttle: Add throttle group infrastructure tests
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (10 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 11/17] throttle: Add throttle group infrastructure Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 13/17] throttle: Add throttle group support Stefan Hajnoczi
` (5 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi
From: Alberto Garcia <berto@igalia.com>
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 863922756a1f0d94f697d3ed67bc7c1e4bac3eb0.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
tests/test-throttle.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 69 insertions(+), 10 deletions(-)
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
index 458f577..aa0e297 100644
--- a/tests/test-throttle.c
+++ b/tests/test-throttle.c
@@ -1,10 +1,12 @@
/*
* Throttle infrastructure tests
*
- * Copyright Nodalink, SARL. 2013
+ * Copyright Nodalink, EURL. 2013-2014
+ * Copyright Igalia, S.L. 2015
*
* Authors:
- * Benoît Canet <benoit.canet@irqsave.net>
+ * Benoît Canet <benoit.canet@nodalink.com>
+ * Alberto Garcia <berto@igalia.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
* See the COPYING.LIB file in the top-level directory.
@@ -15,6 +17,7 @@
#include "block/aio.h"
#include "qemu/throttle.h"
#include "qemu/error-report.h"
+#include "block/throttle-groups.h"
static AioContext *ctx;
static LeakyBucket bkt;
@@ -500,23 +503,78 @@ static void test_accounting(void)
(64.0 / 13)));
}
+static void test_groups(void)
+{
+ ThrottleConfig cfg1, cfg2;
+ BlockDriverState *bdrv1, *bdrv2, *bdrv3;
+
+ bdrv1 = bdrv_new();
+ bdrv2 = bdrv_new();
+ bdrv3 = bdrv_new();
+
+ g_assert(bdrv1->throttle_state == NULL);
+ g_assert(bdrv2->throttle_state == NULL);
+ g_assert(bdrv3->throttle_state == NULL);
+
+ throttle_group_register_bs(bdrv1, "bar");
+ throttle_group_register_bs(bdrv2, "foo");
+ throttle_group_register_bs(bdrv3, "bar");
+
+ g_assert(bdrv1->throttle_state != NULL);
+ g_assert(bdrv2->throttle_state != NULL);
+ g_assert(bdrv3->throttle_state != NULL);
+
+ g_assert(!strcmp(throttle_group_get_name(bdrv1), "bar"));
+ g_assert(!strcmp(throttle_group_get_name(bdrv2), "foo"));
+ g_assert(bdrv1->throttle_state == bdrv3->throttle_state);
+
+ /* Setting the config of a group member affects the whole group */
+ memset(&cfg1, 0, sizeof(cfg1));
+ cfg1.buckets[THROTTLE_BPS_READ].avg = 500000;
+ cfg1.buckets[THROTTLE_BPS_WRITE].avg = 285000;
+ cfg1.buckets[THROTTLE_OPS_READ].avg = 20000;
+ cfg1.buckets[THROTTLE_OPS_WRITE].avg = 12000;
+ throttle_group_config(bdrv1, &cfg1);
+
+ throttle_group_get_config(bdrv1, &cfg1);
+ throttle_group_get_config(bdrv3, &cfg2);
+ g_assert(!memcmp(&cfg1, &cfg2, sizeof(cfg1)));
+
+ cfg2.buckets[THROTTLE_BPS_READ].avg = 4547;
+ cfg2.buckets[THROTTLE_BPS_WRITE].avg = 1349;
+ cfg2.buckets[THROTTLE_OPS_READ].avg = 123;
+ cfg2.buckets[THROTTLE_OPS_WRITE].avg = 86;
+ throttle_group_config(bdrv3, &cfg1);
+
+ throttle_group_get_config(bdrv1, &cfg1);
+ throttle_group_get_config(bdrv3, &cfg2);
+ g_assert(!memcmp(&cfg1, &cfg2, sizeof(cfg1)));
+
+ throttle_group_unregister_bs(bdrv1);
+ throttle_group_unregister_bs(bdrv2);
+ throttle_group_unregister_bs(bdrv3);
+
+ g_assert(bdrv1->throttle_state == NULL);
+ g_assert(bdrv2->throttle_state == NULL);
+ g_assert(bdrv3->throttle_state == NULL);
+}
+
int main(int argc, char **argv)
{
- GSource *src;
Error *local_error = NULL;
- init_clocks();
+ qemu_init_main_loop(&local_error);
+ ctx = qemu_get_aio_context();
- ctx = aio_context_new(&local_error);
if (!ctx) {
error_report("Failed to create AIO Context: '%s'",
- error_get_pretty(local_error));
- error_free(local_error);
+ local_error ? error_get_pretty(local_error) :
+ "Failed to initialize the QEMU main loop");
+ if (local_error) {
+ error_free(local_error);
+ }
exit(1);
}
- src = aio_get_g_source(ctx);
- g_source_attach(src, NULL);
- g_source_unref(src);
do {} while (g_main_context_iteration(NULL, false));
@@ -533,6 +591,7 @@ int main(int argc, char **argv)
g_test_add_func("/throttle/config/is_valid", test_is_valid);
g_test_add_func("/throttle/config_functions", test_config_functions);
g_test_add_func("/throttle/accounting", test_accounting);
+ g_test_add_func("/throttle/groups", test_groups);
return g_test_run();
}
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 13/17] throttle: Add throttle group support
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (11 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 12/17] throttle: Add throttle group infrastructure tests Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 14/17] throttle: acquire the ThrottleGroup lock in bdrv_swap() Stefan Hajnoczi
` (4 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi
From: Alberto Garcia <berto@igalia.com>
The throttle group support use a cooperative round robin scheduling
algorithm.
The principles of the algorithm are simple:
- Each BDS of the group is used as a token in a circular way.
- The active BDS computes if a wait must be done and arms the right
timer.
- If a wait must be done the token timer will be armed so the token
will become the next active BDS.
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 7d0ca2d3615ca3bb156d4c31aa64e6812cda7a6e.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block.c | 15 +--
block/io.c | 75 +++-----------
block/qapi.c | 5 +-
block/throttle-groups.c | 214 +++++++++++++++++++++++++++++++++++++++-
blockdev.c | 38 +++++--
hmp.c | 4 +-
include/block/block.h | 3 +-
include/block/block_int.h | 7 +-
include/block/throttle-groups.h | 4 +
qapi/block-core.json | 25 ++++-
qemu-options.hx | 1 +
qmp-commands.hx | 3 +-
12 files changed, 309 insertions(+), 85 deletions(-)
diff --git a/block.c b/block.c
index 7a02db9..fb993f2 100644
--- a/block.c
+++ b/block.c
@@ -1822,15 +1822,18 @@ static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
bs_dest->enable_write_cache = bs_src->enable_write_cache;
/* i/o throttled req */
- memcpy(&bs_dest->throttle_state,
- &bs_src->throttle_state,
- sizeof(ThrottleState));
+ bs_dest->throttle_state = bs_src->throttle_state,
+ bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
+ bs_dest->pending_reqs[0] = bs_src->pending_reqs[0];
+ bs_dest->pending_reqs[1] = bs_src->pending_reqs[1];
+ bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
+ bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
+ memcpy(&bs_dest->round_robin,
+ &bs_src->round_robin,
+ sizeof(bs_dest->round_robin));
memcpy(&bs_dest->throttle_timers,
&bs_src->throttle_timers,
sizeof(ThrottleTimers));
- bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
- bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
- bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
/* r/w error */
bs_dest->on_read_error = bs_src->on_read_error;
diff --git a/block/io.c b/block/io.c
index ce5b4ec..c1c224f 100644
--- a/block/io.c
+++ b/block/io.c
@@ -23,9 +23,9 @@
*/
#include "trace.h"
-#include "sysemu/qtest.h"
#include "block/blockjob.h"
#include "block/block_int.h"
+#include "block/throttle-groups.h"
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
@@ -65,7 +65,7 @@ void bdrv_set_io_limits(BlockDriverState *bs,
{
int i;
- throttle_config(&bs->throttle_state, &bs->throttle_timers, cfg);
+ throttle_group_config(bs, cfg);
for (i = 0; i < 2; i++) {
qemu_co_enter_next(&bs->throttled_reqs[i]);
@@ -95,76 +95,33 @@ static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
void bdrv_io_limits_disable(BlockDriverState *bs)
{
bs->io_limits_enabled = false;
-
bdrv_start_throttled_reqs(bs);
-
- throttle_timers_destroy(&bs->throttle_timers);
-}
-
-static void bdrv_throttle_read_timer_cb(void *opaque)
-{
- BlockDriverState *bs = opaque;
- qemu_co_enter_next(&bs->throttled_reqs[0]);
-}
-
-static void bdrv_throttle_write_timer_cb(void *opaque)
-{
- BlockDriverState *bs = opaque;
- qemu_co_enter_next(&bs->throttled_reqs[1]);
+ throttle_group_unregister_bs(bs);
}
/* should be called before bdrv_set_io_limits if a limit is set */
-void bdrv_io_limits_enable(BlockDriverState *bs)
+void bdrv_io_limits_enable(BlockDriverState *bs, const char *group)
{
- int clock_type = QEMU_CLOCK_REALTIME;
-
- if (qtest_enabled()) {
- /* For testing block IO throttling only */
- clock_type = QEMU_CLOCK_VIRTUAL;
- }
assert(!bs->io_limits_enabled);
- throttle_init(&bs->throttle_state);
- throttle_timers_init(&bs->throttle_timers,
- bdrv_get_aio_context(bs),
- clock_type,
- bdrv_throttle_read_timer_cb,
- bdrv_throttle_write_timer_cb,
- bs);
+ throttle_group_register_bs(bs, group);
bs->io_limits_enabled = true;
}
-/* This function makes an IO wait if needed
- *
- * @nb_sectors: the number of sectors of the IO
- * @is_write: is the IO a write
- */
-static void bdrv_io_limits_intercept(BlockDriverState *bs,
- unsigned int bytes,
- bool is_write)
+void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
{
- /* does this io must wait */
- bool must_wait = throttle_schedule_timer(&bs->throttle_state,
- &bs->throttle_timers,
- is_write);
-
- /* if must wait or any request of this type throttled queue the IO */
- if (must_wait ||
- !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
- qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
+ /* this bs is not part of any group */
+ if (!bs->throttle_state) {
+ return;
}
- /* the IO will be executed, do the accounting */
- throttle_account(&bs->throttle_state, is_write, bytes);
-
-
- /* if the next request must wait -> do nothing */
- if (throttle_schedule_timer(&bs->throttle_state, &bs->throttle_timers,
- is_write)) {
+ /* this bs is a part of the same group than the one we want */
+ if (!g_strcmp0(throttle_group_get_name(bs), group)) {
return;
}
- /* else queue next request for execution */
- qemu_co_queue_next(&bs->throttled_reqs[is_write]);
+ /* need to change the group this bs belong to */
+ bdrv_io_limits_disable(bs);
+ bdrv_io_limits_enable(bs, group);
}
void bdrv_setup_io_funcs(BlockDriver *bdrv)
@@ -971,7 +928,7 @@ static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
/* throttling disk I/O */
if (bs->io_limits_enabled) {
- bdrv_io_limits_intercept(bs, bytes, false);
+ throttle_group_co_io_limits_intercept(bs, bytes, false);
}
/* Align read if necessary by padding qiov */
@@ -1301,7 +1258,7 @@ static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
/* throttling disk I/O */
if (bs->io_limits_enabled) {
- bdrv_io_limits_intercept(bs, bytes, true);
+ throttle_group_co_io_limits_intercept(bs, bytes, true);
}
/*
diff --git a/block/qapi.c b/block/qapi.c
index 18d2b95..a5ac312 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -24,6 +24,7 @@
#include "block/qapi.h"
#include "block/block_int.h"
+#include "block/throttle-groups.h"
#include "block/write-threshold.h"
#include "qmp-commands.h"
#include "qapi-visit.h"
@@ -65,7 +66,9 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs, Error **errp)
if (bs->io_limits_enabled) {
ThrottleConfig cfg;
- throttle_get_config(&bs->throttle_state, &cfg);
+
+ throttle_group_get_config(bs, &cfg);
+
info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 352077f..da8c70c 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -23,6 +23,9 @@
*/
#include "block/throttle-groups.h"
+#include "qemu/queue.h"
+#include "qemu/thread.h"
+#include "sysemu/qtest.h"
/* The ThrottleGroup structure (with its ThrottleState) is shared
* among different BlockDriverState and it's independent from
@@ -160,6 +163,153 @@ static BlockDriverState *throttle_group_next_bs(BlockDriverState *bs)
return next;
}
+/* Return the next BlockDriverState in the round-robin sequence with
+ * pending I/O requests.
+ *
+ * This assumes that tg->lock is held.
+ *
+ * @bs: the current BlockDriverState
+ * @is_write: the type of operation (read/write)
+ * @ret: the next BlockDriverState with pending requests, or bs
+ * if there is none.
+ */
+static BlockDriverState *next_throttle_token(BlockDriverState *bs,
+ bool is_write)
+{
+ ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+ BlockDriverState *token, *start;
+
+ start = token = tg->tokens[is_write];
+
+ /* get next bs round in round robin style */
+ token = throttle_group_next_bs(token);
+ while (token != start && !token->pending_reqs[is_write]) {
+ token = throttle_group_next_bs(token);
+ }
+
+ /* If no IO are queued for scheduling on the next round robin token
+ * then decide the token is the current bs because chances are
+ * the current bs get the current request queued.
+ */
+ if (token == start && !token->pending_reqs[is_write]) {
+ token = bs;
+ }
+
+ return token;
+}
+
+/* Check if the next I/O request for a BlockDriverState needs to be
+ * throttled or not. If there's no timer set in this group, set one
+ * and update the token accordingly.
+ *
+ * This assumes that tg->lock is held.
+ *
+ * @bs: the current BlockDriverState
+ * @is_write: the type of operation (read/write)
+ * @ret: whether the I/O request needs to be throttled or not
+ */
+static bool throttle_group_schedule_timer(BlockDriverState *bs,
+ bool is_write)
+{
+ ThrottleState *ts = bs->throttle_state;
+ ThrottleTimers *tt = &bs->throttle_timers;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ bool must_wait;
+
+ /* Check if any of the timers in this group is already armed */
+ if (tg->any_timer_armed[is_write]) {
+ return true;
+ }
+
+ must_wait = throttle_schedule_timer(ts, tt, is_write);
+
+ /* If a timer just got armed, set bs as the current token */
+ if (must_wait) {
+ tg->tokens[is_write] = bs;
+ tg->any_timer_armed[is_write] = true;
+ }
+
+ return must_wait;
+}
+
+/* Look for the next pending I/O request and schedule it.
+ *
+ * This assumes that tg->lock is held.
+ *
+ * @bs: the current BlockDriverState
+ * @is_write: the type of operation (read/write)
+ */
+static void schedule_next_request(BlockDriverState *bs, bool is_write)
+{
+ ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+ bool must_wait;
+ BlockDriverState *token;
+
+ /* Check if there's any pending request to schedule next */
+ token = next_throttle_token(bs, is_write);
+ if (!token->pending_reqs[is_write]) {
+ return;
+ }
+
+ /* Set a timer for the request if it needs to be throttled */
+ must_wait = throttle_group_schedule_timer(token, is_write);
+
+ /* If it doesn't have to wait, queue it for immediate execution */
+ if (!must_wait) {
+ /* Give preference to requests from the current bs */
+ if (qemu_in_coroutine() &&
+ qemu_co_queue_next(&bs->throttled_reqs[is_write])) {
+ token = bs;
+ } else {
+ ThrottleTimers *tt = &token->throttle_timers;
+ int64_t now = qemu_clock_get_ns(tt->clock_type);
+ timer_mod(tt->timers[is_write], now + 1);
+ tg->any_timer_armed[is_write] = true;
+ }
+ tg->tokens[is_write] = token;
+ }
+}
+
+/* Check if an I/O request needs to be throttled, wait and set a timer
+ * if necessary, and schedule the next request using a round robin
+ * algorithm.
+ *
+ * @bs: the current BlockDriverState
+ * @bytes: the number of bytes for this I/O
+ * @is_write: the type of operation (read/write)
+ */
+void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
+ unsigned int bytes,
+ bool is_write)
+{
+ bool must_wait;
+ BlockDriverState *token;
+
+ ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+ qemu_mutex_lock(&tg->lock);
+
+ /* First we check if this I/O has to be throttled. */
+ token = next_throttle_token(bs, is_write);
+ must_wait = throttle_group_schedule_timer(token, is_write);
+
+ /* Wait if there's a timer set or queued requests of this type */
+ if (must_wait || bs->pending_reqs[is_write]) {
+ bs->pending_reqs[is_write]++;
+ qemu_mutex_unlock(&tg->lock);
+ qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
+ qemu_mutex_lock(&tg->lock);
+ bs->pending_reqs[is_write]--;
+ }
+
+ /* The I/O will be executed, so do the accounting */
+ throttle_account(bs->throttle_state, is_write, bytes);
+
+ /* Schedule the next request */
+ schedule_next_request(bs, is_write);
+
+ qemu_mutex_unlock(&tg->lock);
+}
+
/* Update the throttle configuration for a particular group. Similar
* to throttle_config(), but guarantees atomicity within the
* throttling group.
@@ -195,9 +345,49 @@ void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg)
qemu_mutex_unlock(&tg->lock);
}
-/* Register a BlockDriverState in the throttling group, also updating
- * its throttle_state pointer to point to it. If a throttling group
- * with that name does not exist yet, it will be created.
+/* ThrottleTimers callback. This wakes up a request that was waiting
+ * because it had been throttled.
+ *
+ * @bs: the BlockDriverState whose request had been throttled
+ * @is_write: the type of operation (read/write)
+ */
+static void timer_cb(BlockDriverState *bs, bool is_write)
+{
+ ThrottleState *ts = bs->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ bool empty_queue;
+
+ /* The timer has just been fired, so we can update the flag */
+ qemu_mutex_lock(&tg->lock);
+ tg->any_timer_armed[is_write] = false;
+ qemu_mutex_unlock(&tg->lock);
+
+ /* Run the request that was waiting for this timer */
+ empty_queue = !qemu_co_enter_next(&bs->throttled_reqs[is_write]);
+
+ /* If the request queue was empty then we have to take care of
+ * scheduling the next one */
+ if (empty_queue) {
+ qemu_mutex_lock(&tg->lock);
+ schedule_next_request(bs, is_write);
+ qemu_mutex_unlock(&tg->lock);
+ }
+}
+
+static void read_timer_cb(void *opaque)
+{
+ timer_cb(opaque, false);
+}
+
+static void write_timer_cb(void *opaque)
+{
+ timer_cb(opaque, true);
+}
+
+/* Register a BlockDriverState in the throttling group, also
+ * initializing its timers and updating its throttle_state pointer to
+ * point to it. If a throttling group with that name does not exist
+ * yet, it will be created.
*
* @bs: the BlockDriverState to insert
* @groupname: the name of the group
@@ -206,6 +396,12 @@ void throttle_group_register_bs(BlockDriverState *bs, const char *groupname)
{
int i;
ThrottleGroup *tg = throttle_group_incref(groupname);
+ int clock_type = QEMU_CLOCK_REALTIME;
+
+ if (qtest_enabled()) {
+ /* For testing block IO throttling only */
+ clock_type = QEMU_CLOCK_VIRTUAL;
+ }
bs->throttle_state = &tg->ts;
@@ -218,11 +414,20 @@ void throttle_group_register_bs(BlockDriverState *bs, const char *groupname)
}
QLIST_INSERT_HEAD(&tg->head, bs, round_robin);
+
+ throttle_timers_init(&bs->throttle_timers,
+ bdrv_get_aio_context(bs),
+ clock_type,
+ read_timer_cb,
+ write_timer_cb,
+ bs);
+
qemu_mutex_unlock(&tg->lock);
}
/* Unregister a BlockDriverState from its group, removing it from the
- * list and setting the throttle_state pointer to NULL.
+ * list, destroying the timers and setting the throttle_state pointer
+ * to NULL.
*
* The group will be destroyed if it's empty after this operation.
*
@@ -247,6 +452,7 @@ void throttle_group_unregister_bs(BlockDriverState *bs)
/* remove the current bs from the list */
QLIST_REMOVE(bs, round_robin);
+ throttle_timers_destroy(&bs->throttle_timers);
qemu_mutex_unlock(&tg->lock);
throttle_group_unref(tg);
diff --git a/blockdev.c b/blockdev.c
index 6a45555..38a8932 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -34,6 +34,7 @@
#include "sysemu/blockdev.h"
#include "hw/block/block.h"
#include "block/blockjob.h"
+#include "block/throttle-groups.h"
#include "monitor/monitor.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
@@ -357,6 +358,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
const char *id;
bool has_driver_specific_opts;
BlockdevDetectZeroesOptions detect_zeroes;
+ const char *throttling_group;
/* Check common options by copying from bs_opts to opts, all other options
* stay in bs_opts for processing by bdrv_open(). */
@@ -459,6 +461,8 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
cfg.op_size = qemu_opt_get_number(opts, "throttling.iops-size", 0);
+ throttling_group = qemu_opt_get(opts, "throttling.group");
+
if (!check_throttle_config(&cfg, &error)) {
error_propagate(errp, error);
goto early_err;
@@ -547,7 +551,10 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
/* disk I/O throttling */
if (throttle_enabled(&cfg)) {
- bdrv_io_limits_enable(bs);
+ if (!throttling_group) {
+ throttling_group = blk_name(blk);
+ }
+ bdrv_io_limits_enable(bs, throttling_group);
bdrv_set_io_limits(bs, &cfg);
}
@@ -711,6 +718,8 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type)
{ "iops_size", "throttling.iops-size" },
+ { "group", "throttling.group" },
+
{ "readonly", "read-only" },
};
@@ -1951,7 +1960,9 @@ void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd,
bool has_iops_wr_max,
int64_t iops_wr_max,
bool has_iops_size,
- int64_t iops_size, Error **errp)
+ int64_t iops_size,
+ bool has_group,
+ const char *group, Error **errp)
{
ThrottleConfig cfg;
BlockDriverState *bs;
@@ -2004,16 +2015,21 @@ void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd,
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
- if (!bs->io_limits_enabled && throttle_enabled(&cfg)) {
- bdrv_io_limits_enable(bs);
- } else if (bs->io_limits_enabled && !throttle_enabled(&cfg)) {
+ if (throttle_enabled(&cfg)) {
+ /* Enable I/O limits if they're not enabled yet, otherwise
+ * just update the throttling group. */
+ if (!bs->io_limits_enabled) {
+ bdrv_io_limits_enable(bs, has_group ? group : device);
+ } else if (has_group) {
+ bdrv_io_limits_update_group(bs, group);
+ }
+ /* Set the new throttling configuration */
+ bdrv_set_io_limits(bs, &cfg);
+ } else if (bs->io_limits_enabled) {
+ /* If all throttling settings are set to 0, disable I/O limits */
bdrv_io_limits_disable(bs);
}
- if (bs->io_limits_enabled) {
- bdrv_set_io_limits(bs, &cfg);
- }
-
aio_context_release(aio_context);
}
@@ -3194,6 +3210,10 @@ QemuOptsList qemu_common_drive_opts = {
.type = QEMU_OPT_NUMBER,
.help = "when limiting by iops max size of an I/O in bytes",
},{
+ .name = "throttling.group",
+ .type = QEMU_OPT_STRING,
+ .help = "name of the block throttling group",
+ },{
.name = "copy-on-read",
.type = QEMU_OPT_BOOL,
.help = "copy read data from backing file into image file",
diff --git a/hmp.c b/hmp.c
index d5b9ebb..9c5c73e 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1356,7 +1356,9 @@ void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict)
false,
0,
false, /* No default I/O size */
- 0, &err);
+ 0,
+ false,
+ NULL, &err);
hmp_handle_error(mon, &err);
}
diff --git a/include/block/block.h b/include/block/block.h
index 4d9b555..9477b8f 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -173,8 +173,9 @@ void bdrv_stats_print(Monitor *mon, const QObject *data);
void bdrv_info_stats(Monitor *mon, QObject **ret_data);
/* disk I/O throttling */
-void bdrv_io_limits_enable(BlockDriverState *bs);
+void bdrv_io_limits_enable(BlockDriverState *bs, const char *group);
void bdrv_io_limits_disable(BlockDriverState *bs);
+void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group);
void bdrv_init(void);
void bdrv_init_with_whitelist(void);
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 16444a9..8cd93f2 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -379,10 +379,13 @@ struct BlockDriverState {
unsigned int serialising_in_flight;
/* I/O throttling */
- ThrottleState throttle_state;
- ThrottleTimers throttle_timers;
CoQueue throttled_reqs[2];
bool io_limits_enabled;
+ /* The following fields are protected by the ThrottleGroup lock.
+ * See the ThrottleGroup documentation for details. */
+ ThrottleState *throttle_state;
+ ThrottleTimers throttle_timers;
+ unsigned pending_reqs[2];
QLIST_ENTRY(BlockDriverState) round_robin;
/* I/O stats (display with "info blockstats"). */
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
index b966ec7..322139a 100644
--- a/include/block/throttle-groups.h
+++ b/include/block/throttle-groups.h
@@ -36,4 +36,8 @@ void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg);
void throttle_group_register_bs(BlockDriverState *bs, const char *groupname);
void throttle_group_unregister_bs(BlockDriverState *bs);
+void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
+ unsigned int bytes,
+ bool is_write);
+
#endif
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 71ed838..07f476d 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1068,6 +1068,27 @@
#
# Change I/O throttle limits for a block drive.
#
+# Since QEMU 2.4, each device with I/O limits is member of a throttle
+# group.
+#
+# If two or more devices are members of the same group, the limits
+# will apply to the combined I/O of the whole group in a round-robin
+# fashion. Therefore, setting new I/O limits to a device will affect
+# the whole group.
+#
+# The name of the group can be specified using the 'group' parameter.
+# If the parameter is unset, it is assumed to be the current group of
+# that device. If it's not in any group yet, the name of the device
+# will be used as the name for its group.
+#
+# The 'group' parameter can also be used to move a device to a
+# different group. In this case the limits specified in the parameters
+# will be applied to the new group only.
+#
+# I/O limits can be disabled by setting all of them to 0. In this case
+# the device will be removed from its group and the rest of its
+# members will no be affected. The 'group' parameter is ignored.
+#
# @device: The name of the device
#
# @bps: total throughput limit in bytes per second
@@ -1096,6 +1117,8 @@
#
# @iops_size: #optional an I/O size in bytes (Since 1.7)
#
+# @group: #optional throttle group name (Since 2.4)
+#
# Returns: Nothing on success
# If @device is not a valid block device, DeviceNotFound
#
@@ -1107,7 +1130,7 @@
'*bps_max': 'int', '*bps_rd_max': 'int',
'*bps_wr_max': 'int', '*iops_max': 'int',
'*iops_rd_max': 'int', '*iops_wr_max': 'int',
- '*iops_size': 'int' } }
+ '*iops_size': 'int', '*group': 'str' } }
##
# @block-stream:
diff --git a/qemu-options.hx b/qemu-options.hx
index b3db6cb..090b5e9 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -464,6 +464,7 @@ DEF("drive", HAS_ARG, QEMU_OPTION_drive,
" [[,bps_max=bm]|[[,bps_rd_max=rm][,bps_wr_max=wm]]]\n"
" [[,iops_max=im]|[[,iops_rd_max=irm][,iops_wr_max=iwm]]]\n"
" [[,iops_size=is]]\n"
+ " [[,group=g]]\n"
" use 'file' as a drive image\n", QEMU_ARCH_ALL)
STEXI
@item -drive @var{option}[,@var{option}[,@var{option}[,...]]]
diff --git a/qmp-commands.hx b/qmp-commands.hx
index 3b33496..38bd16d 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -1856,7 +1856,7 @@ EQMP
{
.name = "block_set_io_throttle",
- .args_type = "device:B,bps:l,bps_rd:l,bps_wr:l,iops:l,iops_rd:l,iops_wr:l,bps_max:l?,bps_rd_max:l?,bps_wr_max:l?,iops_max:l?,iops_rd_max:l?,iops_wr_max:l?,iops_size:l?",
+ .args_type = "device:B,bps:l,bps_rd:l,bps_wr:l,iops:l,iops_rd:l,iops_wr:l,bps_max:l?,bps_rd_max:l?,bps_wr_max:l?,iops_max:l?,iops_rd_max:l?,iops_wr_max:l?,iops_size:l?,group:s?",
.mhandler.cmd_new = qmp_marshal_input_block_set_io_throttle,
},
@@ -1882,6 +1882,7 @@ Arguments:
- "iops_rd_max": read I/O operations max (json-int)
- "iops_wr_max": write I/O operations max (json-int)
- "iops_size": I/O size in bytes when limiting (json-int)
+- "group": throttle group name (json-string)
Example:
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 14/17] throttle: acquire the ThrottleGroup lock in bdrv_swap()
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (12 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 13/17] throttle: Add throttle group support Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 15/17] throttle: add the name of the ThrottleGroup to BlockDeviceInfo Stefan Hajnoczi
` (3 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi
From: Alberto Garcia <berto@igalia.com>
bdrv_swap() touches the fields of a BlockDriverState that are
protected by the ThrottleGroup lock. Although those fields end up in
their original place, they are temporarily swapped in the process,
so there's a chance that an operation on a member of the same group
happening on a different thread can try to use them.
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 59ff27e9afb45a9f694bb0fa6a86b80494fa0bc8.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block.c | 16 ++++++++++++++++
block/throttle-groups.c | 31 ++++++++++++++++++++++++++++++-
include/block/throttle-groups.h | 3 +++
3 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/block.c b/block.c
index fb993f2..53918cf 100644
--- a/block.c
+++ b/block.c
@@ -36,6 +36,7 @@
#include "qmp-commands.h"
#include "qemu/timer.h"
#include "qapi-event.h"
+#include "block/throttle-groups.h"
#ifdef CONFIG_BSD
#include <sys/types.h>
@@ -1887,11 +1888,20 @@ void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
}
+ /* If the BlockDriverState is part of a throttling group acquire
+ * its lock since we're going to mess with the protected fields.
+ * Otherwise there's no need to worry since no one else can touch
+ * them. */
+ if (bs_old->throttle_state) {
+ throttle_group_lock(bs_old);
+ }
+
/* bs_new must be unattached and shouldn't have anything fancy enabled */
assert(!bs_new->blk);
assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
assert(bs_new->job == NULL);
assert(bs_new->io_limits_enabled == false);
+ assert(bs_new->throttle_state == NULL);
assert(!throttle_timers_are_initialized(&bs_new->throttle_timers));
tmp = *bs_new;
@@ -1909,8 +1919,14 @@ void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
/* Check a few fields that should remain attached to the device */
assert(bs_new->job == NULL);
assert(bs_new->io_limits_enabled == false);
+ assert(bs_new->throttle_state == NULL);
assert(!throttle_timers_are_initialized(&bs_new->throttle_timers));
+ /* Release the ThrottleGroup lock */
+ if (bs_old->throttle_state) {
+ throttle_group_unlock(bs_old);
+ }
+
/* insert the nodes back into the graph node list if needed */
if (bs_new->node_name[0] != '\0') {
QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index da8c70c..efc462f 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -33,7 +33,8 @@
* its own locking.
*
* This locking is however handled internally in this file, so it's
- * transparent to outside users.
+ * mostly transparent to outside users (but see the documentation in
+ * throttle_groups_lock()).
*
* The whole ThrottleGroup structure is private and invisible to
* outside users, that only use it through its ThrottleState.
@@ -459,6 +460,34 @@ void throttle_group_unregister_bs(BlockDriverState *bs)
bs->throttle_state = NULL;
}
+/* Acquire the lock of this throttling group.
+ *
+ * You won't normally need to use this. None of the functions from the
+ * ThrottleGroup API require you to acquire the lock since all of them
+ * deal with it internally.
+ *
+ * This should only be used in exceptional cases when you want to
+ * access the protected fields of a BlockDriverState directly
+ * (e.g. bdrv_swap()).
+ *
+ * @bs: a BlockDriverState that is member of the group
+ */
+void throttle_group_lock(BlockDriverState *bs)
+{
+ ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+ qemu_mutex_lock(&tg->lock);
+}
+
+/* Release the lock of this throttling group.
+ *
+ * See the comments in throttle_group_lock().
+ */
+void throttle_group_unlock(BlockDriverState *bs)
+{
+ ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+ qemu_mutex_unlock(&tg->lock);
+}
+
static void throttle_groups_init(void)
{
qemu_mutex_init(&throttle_groups_lock);
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
index 322139a..fab113f 100644
--- a/include/block/throttle-groups.h
+++ b/include/block/throttle-groups.h
@@ -40,4 +40,7 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
unsigned int bytes,
bool is_write);
+void throttle_group_lock(BlockDriverState *bs);
+void throttle_group_unlock(BlockDriverState *bs);
+
#endif
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 15/17] throttle: add the name of the ThrottleGroup to BlockDeviceInfo
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (13 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 14/17] throttle: acquire the ThrottleGroup lock in bdrv_swap() Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 16/17] throttle: Update throttle infrastructure copyright Stefan Hajnoczi
` (2 subsequent siblings)
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi
From: Alberto Garcia <berto@igalia.com>
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: ff3b3bab499b8d9d6e1ce1dbddf84334571aaab7.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/qapi.c | 3 +++
hmp.c | 6 ++++--
qapi/block-core.json | 4 +++-
3 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/block/qapi.c b/block/qapi.c
index a5ac312..a738148 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -93,6 +93,9 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs, Error **errp)
info->has_iops_size = cfg.op_size;
info->iops_size = cfg.op_size;
+
+ info->has_group = true;
+ info->group = g_strdup(throttle_group_get_name(bs));
}
info->write_threshold = bdrv_write_threshold_get(bs);
diff --git a/hmp.c b/hmp.c
index 9c5c73e..810bf22 100644
--- a/hmp.c
+++ b/hmp.c
@@ -399,7 +399,8 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
" iops_max=%" PRId64
" iops_rd_max=%" PRId64
" iops_wr_max=%" PRId64
- " iops_size=%" PRId64 "\n",
+ " iops_size=%" PRId64
+ " group=%s\n",
inserted->bps,
inserted->bps_rd,
inserted->bps_wr,
@@ -412,7 +413,8 @@ static void print_block_info(Monitor *mon, BlockInfo *info,
inserted->iops_max,
inserted->iops_rd_max,
inserted->iops_wr_max,
- inserted->iops_size);
+ inserted->iops_size,
+ inserted->group);
}
if (verbose) {
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 07f476d..a597035 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -259,6 +259,8 @@
#
# @iops_size: #optional an I/O size in bytes (Since 1.7)
#
+# @group: #optional throttle group name (Since 2.4)
+#
# @cache: the cache mode used for the block device (since: 2.3)
#
# @write_threshold: configured write threshold for the device.
@@ -278,7 +280,7 @@
'*bps_max': 'int', '*bps_rd_max': 'int',
'*bps_wr_max': 'int', '*iops_max': 'int',
'*iops_rd_max': 'int', '*iops_wr_max': 'int',
- '*iops_size': 'int', 'cache': 'BlockdevCacheInfo',
+ '*iops_size': 'int', '*group': 'str', 'cache': 'BlockdevCacheInfo',
'write_threshold': 'int' } }
##
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 16/17] throttle: Update throttle infrastructure copyright
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (14 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 15/17] throttle: add the name of the ThrottleGroup to BlockDeviceInfo Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 11:57 ` [Qemu-devel] [PULL 17/17] qemu-iotests: expand test 093 to support group throttling Stefan Hajnoczi
2015-06-05 13:53 ` [Qemu-devel] [PULL 00/17] Block patches Peter Maydell
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi
From: Alberto Garcia <berto@igalia.com>
Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 44ae278a9fb498be955774a545009a854b404392.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
include/qemu/throttle.h | 8 +++++---
util/throttle.c | 8 +++++---
2 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
index 2c560db..5af76f0 100644
--- a/include/qemu/throttle.h
+++ b/include/qemu/throttle.h
@@ -1,10 +1,12 @@
/*
* QEMU throttling infrastructure
*
- * Copyright (C) Nodalink, SARL. 2013
+ * Copyright (C) Nodalink, EURL. 2013-2014
+ * Copyright (C) Igalia, S.L. 2015
*
- * Author:
- * Benoît Canet <benoit.canet@irqsave.net>
+ * Authors:
+ * Benoît Canet <benoit.canet@nodalink.com>
+ * Alberto Garcia <berto@igalia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/util/throttle.c b/util/throttle.c
index d76a48e..706c131 100644
--- a/util/throttle.c
+++ b/util/throttle.c
@@ -1,10 +1,12 @@
/*
* QEMU throttling infrastructure
*
- * Copyright (C) Nodalink, SARL. 2013
+ * Copyright (C) Nodalink, EURL. 2013-2014
+ * Copyright (C) Igalia, S.L. 2015
*
- * Author:
- * Benoît Canet <benoit.canet@irqsave.net>
+ * Authors:
+ * Benoît Canet <benoit.canet@nodalink.com>
+ * Alberto Garcia <berto@igalia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* [Qemu-devel] [PULL 17/17] qemu-iotests: expand test 093 to support group throttling
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (15 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 16/17] throttle: Update throttle infrastructure copyright Stefan Hajnoczi
@ 2015-06-05 11:57 ` Stefan Hajnoczi
2015-06-05 13:53 ` [Qemu-devel] [PULL 00/17] Block patches Peter Maydell
17 siblings, 0 replies; 20+ messages in thread
From: Stefan Hajnoczi @ 2015-06-05 11:57 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Peter Maydell, Alberto Garcia, Stefan Hajnoczi
From: Alberto Garcia <berto@igalia.com>
This patch improves the test by attaching a different number of drives
to the VM and putting them in the same throttling group. The test
verifies that the I/O is evenly distributed among all members of the
group, and that the limits are enforced.
By default the test is repeated 3 times with 1, 2 and 3 drives, but
the maximum number of simultaneous drives is configurable.
Signed-off-by: Alberto Garcia <berto@igalia.com>
Message-id: d0fdc6978692156725b97bac7cdde88f7b774b84.1432037840.git.berto@igalia.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
tests/qemu-iotests/093 | 89 +++++++++++++++++++++++++++++++++++---------------
1 file changed, 63 insertions(+), 26 deletions(-)
diff --git a/tests/qemu-iotests/093 b/tests/qemu-iotests/093
index b9096a5..c0e9e2b 100755
--- a/tests/qemu-iotests/093
+++ b/tests/qemu-iotests/093
@@ -3,6 +3,7 @@
# Tests for IO throttling
#
# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2015 Igalia, S.L.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,6 +23,7 @@ import iotests
class ThrottleTestCase(iotests.QMPTestCase):
test_img = "null-aio://"
+ max_drives = 3
def blockstats(self, device):
result = self.vm.qmp("query-blockstats")
@@ -32,26 +34,31 @@ class ThrottleTestCase(iotests.QMPTestCase):
raise Exception("Device not found for blockstats: %s" % device)
def setUp(self):
- self.vm = iotests.VM().add_drive(self.test_img)
+ self.vm = iotests.VM()
+ for i in range(0, self.max_drives):
+ self.vm.add_drive(self.test_img)
self.vm.launch()
def tearDown(self):
self.vm.shutdown()
- def do_test_throttle(self, seconds, params):
+ def do_test_throttle(self, ndrives, seconds, params):
def check_limit(limit, num):
# IO throttling algorithm is discrete, allow 10% error so the test
# is more robust
return limit == 0 or \
- (num < seconds * limit * 1.1
- and num > seconds * limit * 0.9)
+ (num < seconds * limit * 1.1 / ndrives
+ and num > seconds * limit * 0.9 / ndrives)
nsec_per_sec = 1000000000
- params['device'] = 'drive0'
+ params['group'] = 'test'
- result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
- self.assert_qmp(result, 'return', {})
+ # Set the I/O throttling parameters to all drives
+ for i in range(0, ndrives):
+ params['device'] = 'drive%d' % i
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
+ self.assert_qmp(result, 'return', {})
# Set vm clock to a known value
ns = seconds * nsec_per_sec
@@ -66,32 +73,60 @@ class ThrottleTestCase(iotests.QMPTestCase):
params['iops'] / 2,
params['iops_rd'])
rd_nr *= seconds * 2
+ rd_nr /= ndrives
wr_nr = max(params['bps'] / rq_size / 2,
params['bps_wr'] / rq_size,
params['iops'] / 2,
params['iops_wr'])
wr_nr *= seconds * 2
+ wr_nr /= ndrives
+
+ # Send I/O requests to all drives
for i in range(rd_nr):
- self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % (i * rq_size, rq_size))
+ for drive in range(0, ndrives):
+ self.vm.hmp_qemu_io("drive%d" % drive, "aio_read %d %d" %
+ (i * rq_size, rq_size))
+
for i in range(wr_nr):
- self.vm.hmp_qemu_io("drive0", "aio_write %d %d" % (i * rq_size, rq_size))
+ for drive in range(0, ndrives):
+ self.vm.hmp_qemu_io("drive%d" % drive, "aio_write %d %d" %
+ (i * rq_size, rq_size))
- start_rd_bytes, start_rd_iops, start_wr_bytes, start_wr_iops = self.blockstats('drive0')
+ # We'll store the I/O stats for each drive in these arrays
+ start_rd_bytes = [0] * ndrives
+ start_rd_iops = [0] * ndrives
+ start_wr_bytes = [0] * ndrives
+ start_wr_iops = [0] * ndrives
+ end_rd_bytes = [0] * ndrives
+ end_rd_iops = [0] * ndrives
+ end_wr_bytes = [0] * ndrives
+ end_wr_iops = [0] * ndrives
+
+ # Read the stats before advancing the clock
+ for i in range(0, ndrives):
+ start_rd_bytes[i], start_rd_iops[i], start_wr_bytes[i], \
+ start_wr_iops[i] = self.blockstats('drive%d' % i)
self.vm.qtest("clock_step %d" % ns)
- end_rd_bytes, end_rd_iops, end_wr_bytes, end_wr_iops = self.blockstats('drive0')
- rd_bytes = end_rd_bytes - start_rd_bytes
- rd_iops = end_rd_iops - start_rd_iops
- wr_bytes = end_wr_bytes - start_wr_bytes
- wr_iops = end_wr_iops - start_wr_iops
+ # Read the stats after advancing the clock
+ for i in range(0, ndrives):
+ end_rd_bytes[i], end_rd_iops[i], end_wr_bytes[i], \
+ end_wr_iops[i] = self.blockstats('drive%d' % i)
- self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
- self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
- self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
- self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
- self.assertTrue(check_limit(params['iops_rd'], rd_iops))
- self.assertTrue(check_limit(params['iops_wr'], wr_iops))
+ # Check that the I/O is within the limits and evenly distributed
+ for i in range(0, ndrives):
+ rd_bytes = end_rd_bytes[i] - start_rd_bytes[i]
+ rd_iops = end_rd_iops[i] - start_rd_iops[i]
+ wr_bytes = end_wr_bytes[i] - start_wr_bytes[i]
+ wr_iops = end_wr_iops[i] - start_wr_iops[i]
+
+ self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
+ self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
+ self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
+ self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
+ self.assertTrue(check_limit(params['iops_rd'], rd_iops))
+ self.assertTrue(check_limit(params['iops_wr'], wr_iops))
def test_all(self):
params = {"bps": 4096,
@@ -101,11 +136,13 @@ class ThrottleTestCase(iotests.QMPTestCase):
"iops_rd": 10,
"iops_wr": 10,
}
- # Pick each out of all possible params and test
- for tk in params:
- limits = dict([(k, 0) for k in params])
- limits[tk] = params[tk]
- self.do_test_throttle(5, limits)
+ # Repeat the test with different numbers of drives
+ for ndrives in range(1, self.max_drives + 1):
+ # Pick each out of all possible params and test
+ for tk in params:
+ limits = dict([(k, 0) for k in params])
+ limits[tk] = params[tk] * ndrives
+ self.do_test_throttle(ndrives, 5, limits)
class ThrottleTestCoroutine(ThrottleTestCase):
test_img = "null-co://"
--
2.4.2
^ permalink raw reply related [flat|nested] 20+ messages in thread
* Re: [Qemu-devel] [PULL 00/17] Block patches
2015-06-05 11:57 [Qemu-devel] [PULL 00/17] Block patches Stefan Hajnoczi
` (16 preceding siblings ...)
2015-06-05 11:57 ` [Qemu-devel] [PULL 17/17] qemu-iotests: expand test 093 to support group throttling Stefan Hajnoczi
@ 2015-06-05 13:53 ` Peter Maydell
17 siblings, 0 replies; 20+ messages in thread
From: Peter Maydell @ 2015-06-05 13:53 UTC (permalink / raw)
To: Stefan Hajnoczi; +Cc: Kevin Wolf, QEMU Developers
On 5 June 2015 at 12:57, Stefan Hajnoczi <stefanha@redhat.com> wrote:
> The following changes since commit 3b730f570c5872ceea2137848f1d4554d4847441:
>
> Merge remote-tracking branch 'remotes/agraf/tags/signed-ppc-for-upstream' into staging (2015-06-04 14:04:14 +0100)
>
> are available in the git repository at:
>
> git://github.com/stefanha/qemu.git tags/block-pull-request
>
> for you to fetch changes up to 1ad69e1128519a970a9fdf0203a67ab4bc18eb68:
>
> qemu-iotests: expand test 093 to support group throttling (2015-06-05 11:03:07 +0100)
>
> ----------------------------------------------------------------
I'm afraid this doesn't pass 'make check' on my OSX box:
MALLOC_PERTURB_=${MALLOC_PERTURB_:-$((RANDOM % 255 + 1))} gtester -k
--verbose -m=quick tests/test-throttle
TEST: tests/test-throttle... (pid=56152)
/throttle/leak_bucket: OK
/throttle/compute_wait:
qemu: qemu_mutex_lock: Invalid argument
OK
/throttle/init: OK
/throttle/destroy: OK
/throttle/have_timer: OK
/throttle/detach_attach: OK
/throttle/config/enabled: OK
/throttle/config/conflicting: OK
/throttle/config/is_valid: OK
/throttle/config_functions: OK
/throttle/accounting: OK
/throttle/groups: FAIL
GTester: last random seed: R02S1a38f05a43ead1a639325fc354866dd4
(pid=56154)
FAIL: tests/test-throttle
make: *** [check-tests/test-throttle] Error 1
Backtrace:
#0 0x00007fff8d25c286 in __pthread_kill ()
#1 0x00007fff9495d42f in pthread_kill ()
#2 0x00007fff9635fb53 in abort ()
#3 0x00000001000c0890 in error_exit (err=22, msg=0x1000f0042
"qemu_mutex_lock") at
/Users/pm215/src/qemu/util/qemu-thread-posix.c:48
#4 0x00000001000c0904 in qemu_mutex_lock (mutex=0x1000fa560) at
/Users/pm215/src/qemu/util/qemu-thread-posix.c:75
#5 0x00000001000719b3 in throttle_group_incref (name=0x1000e0d20
"bar") at /Users/pm215/src/qemu/block/throttle-groups.c:86
#6 0x0000000100071829 in throttle_group_register_bs (bs=0x10100c400,
groupname=0x1000e0d20 "bar") at
/Users/pm215/src/qemu/block/throttle-groups.c:399
#7 0x0000000100002c89 in test_groups () at
/Users/pm215/src/qemu/tests/test-throttle.c:519
#8 0x00000001005d891d in g_test_run_suite_internal ()
#9 0x00000001005d8ae1 in g_test_run_suite_internal ()
#10 0x00000001005d8198 in g_test_run_suite ()
#11 0x00000001000012da in main (argc=1, argv=0x7fff5fbff8e0) at
/Users/pm215/src/qemu/tests/test-throttle.c:595
OSX errno 22 is EINVAL.
As far as I can tell nothing is calling throttle_groups_init() and
so the mutex hasn't been initialized when the test code tries to
lock it.
-- PMM
^ permalink raw reply [flat|nested] 20+ messages in thread