From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
To: qemu-block@nongnu.org
Cc: Fam Zheng <fam@euphon.net>, Kevin Wolf <kwolf@redhat.com>,
Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>,
Emanuele Giuseppe Esposito <eesposit@redhat.com>,
qemu-devel@nongnu.org, Hanna Reitz <hreitz@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>, John Snow <jsnow@redhat.com>
Subject: [RFC PATCH 3/5] block/io.c: introduce bdrv_subtree_drained_{begin/end}_unlocked
Date: Tue, 1 Mar 2022 09:21:11 -0500 [thread overview]
Message-ID: <20220301142113.163174-4-eesposit@redhat.com> (raw)
In-Reply-To: <20220301142113.163174-1-eesposit@redhat.com>
Same as the locked version, but use BDRV_POLL_UNLOCKED.
We are going to add drains to all graph modifications, and they are
generally performed without the AioContext lock taken.
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
block/io.c | 48 ++++++++++++++++++++++++++++++++-----------
include/block/block.h | 2 ++
2 files changed, 38 insertions(+), 12 deletions(-)
diff --git a/block/io.c b/block/io.c
index 4e4cb556c5..d474449d2d 100644
--- a/block/io.c
+++ b/block/io.c
@@ -242,6 +242,7 @@ typedef struct {
bool begin;
bool recursive;
bool poll;
+ bool unlocked;
BdrvChild *parent;
bool ignore_bds_parents;
int *drained_end_counter;
@@ -332,7 +333,7 @@ static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
BdrvChild *parent, bool ignore_bds_parents,
- bool poll);
+ bool poll, bool unlocked);
static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
BdrvChild *parent, bool ignore_bds_parents,
int *drained_end_counter);
@@ -350,7 +351,8 @@ static void bdrv_co_drain_bh_cb(void *opaque)
if (data->begin) {
assert(!data->drained_end_counter);
bdrv_do_drained_begin(bs, data->recursive, data->parent,
- data->ignore_bds_parents, data->poll);
+ data->ignore_bds_parents, data->poll,
+ data->unlocked);
} else {
assert(!data->poll);
bdrv_do_drained_end(bs, data->recursive, data->parent,
@@ -372,6 +374,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
BdrvChild *parent,
bool ignore_bds_parents,
bool poll,
+ bool unlocked,
int *drained_end_counter)
{
BdrvCoDrainData data;
@@ -392,6 +395,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
.parent = parent,
.ignore_bds_parents = ignore_bds_parents,
.poll = poll,
+ .unlocked = unlocked,
.drained_end_counter = drained_end_counter,
};
@@ -439,13 +443,13 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
BdrvChild *parent, bool ignore_bds_parents,
- bool poll)
+ bool poll, bool unlocked)
{
BdrvChild *child, *next;
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
- poll, NULL);
+ poll, unlocked, NULL);
return;
}
@@ -456,7 +460,7 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
bs->recursive_quiesce_counter++;
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
- false);
+ false, false);
}
}
@@ -471,18 +475,30 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
*/
if (poll) {
assert(!ignore_bds_parents);
- BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
+ if (unlocked) {
+ BDRV_POLL_WHILE_UNLOCKED(bs,
+ bdrv_drain_poll_top_level(bs, recursive,
+ parent));
+ } else {
+ BDRV_POLL_WHILE(bs,
+ bdrv_drain_poll_top_level(bs, recursive, parent));
+ }
}
}
void bdrv_drained_begin(BlockDriverState *bs)
{
- bdrv_do_drained_begin(bs, false, NULL, false, true);
+ bdrv_do_drained_begin(bs, false, NULL, false, true, false);
}
void bdrv_subtree_drained_begin(BlockDriverState *bs)
{
- bdrv_do_drained_begin(bs, true, NULL, false, true);
+ bdrv_do_drained_begin(bs, true, NULL, false, true, false);
+}
+
+void bdrv_subtree_drained_begin_unlocked(BlockDriverState *bs)
+{
+ bdrv_do_drained_begin(bs, true, NULL, false, true, true);
}
/**
@@ -510,7 +526,7 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
- false, drained_end_counter);
+ false, false, drained_end_counter);
return;
}
assert(bs->quiesce_counter > 0);
@@ -554,12 +570,19 @@ void bdrv_subtree_drained_end(BlockDriverState *bs)
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
}
+void bdrv_subtree_drained_end_unlocked(BlockDriverState *bs)
+{
+ int drained_end_counter = 0;
+ bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
+ BDRV_POLL_WHILE_UNLOCKED(bs, qatomic_read(&drained_end_counter) > 0);
+}
+
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
{
int i;
for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
- bdrv_do_drained_begin(child->bs, true, child, false, true);
+ bdrv_do_drained_begin(child->bs, true, child, false, true, false);
}
}
@@ -642,7 +665,8 @@ void bdrv_drain_all_begin(void)
BlockDriverState *bs = NULL;
if (qemu_in_coroutine()) {
- bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
+ bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, false,
+ NULL);
return;
}
@@ -667,7 +691,7 @@ void bdrv_drain_all_begin(void)
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
- bdrv_do_drained_begin(bs, false, NULL, true, false);
+ bdrv_do_drained_begin(bs, false, NULL, true, false, false);
aio_context_release(aio_context);
}
diff --git a/include/block/block.h b/include/block/block.h
index 5a7a850c16..072bde370f 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -780,6 +780,7 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
* exclusive access to all child nodes as well.
*/
void bdrv_subtree_drained_begin(BlockDriverState *bs);
+void bdrv_subtree_drained_begin_unlocked(BlockDriverState *bs);
/**
* bdrv_drained_end:
@@ -812,6 +813,7 @@ void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
* End a quiescent section started by bdrv_subtree_drained_begin().
*/
void bdrv_subtree_drained_end(BlockDriverState *bs);
+void bdrv_subtree_drained_end_unlocked(BlockDriverState *bs);
void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
Error **errp);
--
2.31.1
next prev parent reply other threads:[~2022-03-01 14:24 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-01 14:21 [RFC PATCH 0/5] Removal of AioContext lock, bs->parents and ->children: proof of concept Emanuele Giuseppe Esposito
2022-03-01 14:21 ` [RFC PATCH 1/5] aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED Emanuele Giuseppe Esposito
2022-03-02 16:21 ` Stefan Hajnoczi
2022-03-01 14:21 ` [RFC PATCH 2/5] introduce BDRV_POLL_WHILE_UNLOCKED Emanuele Giuseppe Esposito
2022-03-02 16:22 ` Stefan Hajnoczi
2022-03-09 13:49 ` Eric Blake
2022-03-01 14:21 ` Emanuele Giuseppe Esposito [this message]
2022-03-02 16:25 ` [RFC PATCH 3/5] block/io.c: introduce bdrv_subtree_drained_{begin/end}_unlocked Stefan Hajnoczi
2022-03-01 14:21 ` [RFC PATCH 4/5] child_job_drained_poll: override polling condition only when in home thread Emanuele Giuseppe Esposito
2022-03-02 16:37 ` Stefan Hajnoczi
2022-03-01 14:21 ` [RFC PATCH 5/5] test-bdrv-drain: ensure draining from main loop stops iothreads Emanuele Giuseppe Esposito
2022-03-01 14:26 ` [RFC PATCH 0/5] Removal of AioContext lock, bs->parents and ->children: proof of concept Emanuele Giuseppe Esposito
2022-03-02 9:47 ` Stefan Hajnoczi
2022-03-09 13:26 ` Emanuele Giuseppe Esposito
2022-03-10 15:54 ` Stefan Hajnoczi
2022-03-17 16:23 ` Emanuele Giuseppe Esposito
2022-03-30 10:53 ` Hanna Reitz
2022-03-30 11:55 ` Emanuele Giuseppe Esposito
2022-03-30 14:12 ` Hanna Reitz
2022-03-30 16:02 ` Paolo Bonzini
2022-03-31 9:59 ` Paolo Bonzini
2022-03-31 13:51 ` Emanuele Giuseppe Esposito
2022-03-31 16:40 ` Paolo Bonzini
2022-04-01 8:05 ` Emanuele Giuseppe Esposito
2022-04-01 11:01 ` Paolo Bonzini
2022-04-04 9:25 ` Stefan Hajnoczi
2022-04-04 9:41 ` Paolo Bonzini
2022-04-04 9:51 ` Emanuele Giuseppe Esposito
2022-04-04 10:07 ` Paolo Bonzini
2022-04-05 9:39 ` Stefan Hajnoczi
2022-04-05 10:43 ` Kevin Wolf
2022-04-13 13:43 ` Emanuele Giuseppe Esposito
2022-04-13 14:51 ` Kevin Wolf
2022-04-13 15:14 ` Emanuele Giuseppe Esposito
2022-04-13 15:22 ` Emanuele Giuseppe Esposito
2022-04-13 16:29 ` Kevin Wolf
2022-04-13 20:43 ` Paolo Bonzini
2022-04-13 20:46 ` Paolo Bonzini
2022-03-02 11:07 ` Vladimir Sementsov-Ogievskiy
2022-03-02 16:20 ` Stefan Hajnoczi
2022-03-09 13:26 ` Emanuele Giuseppe Esposito
2022-03-16 21:55 ` Emanuele Giuseppe Esposito
2022-03-21 12:22 ` Vladimir Sementsov-Ogievskiy
2022-03-21 15:24 ` Vladimir Sementsov-Ogievskiy
2022-03-21 15:44 ` Vladimir Sementsov-Ogievskiy
2022-03-30 9:09 ` Emanuele Giuseppe Esposito
2022-03-30 9:52 ` Vladimir Sementsov-Ogievskiy
2022-03-30 9:58 ` Emanuele Giuseppe Esposito
2022-04-05 10:55 ` Kevin Wolf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220301142113.163174-4-eesposit@redhat.com \
--to=eesposit@redhat.com \
--cc=fam@euphon.net \
--cc=hreitz@redhat.com \
--cc=jsnow@redhat.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vsementsov@virtuozzo.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).