From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
To: qemu-block@nongnu.org
Cc: Fam Zheng <fam@euphon.net>, Kevin Wolf <kwolf@redhat.com>,
Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>,
Emanuele Giuseppe Esposito <eesposit@redhat.com>,
qemu-devel@nongnu.org, Hanna Reitz <hreitz@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>, John Snow <jsnow@redhat.com>
Subject: [RFC PATCH 5/5] test-bdrv-drain: ensure draining from main loop stops iothreads
Date: Tue, 1 Mar 2022 09:21:13 -0500 [thread overview]
Message-ID: <20220301142113.163174-6-eesposit@redhat.com> (raw)
In-Reply-To: <20220301142113.163174-1-eesposit@redhat.com>
Add 2 tests: test_main_and_then_iothread_drain ensures that if the
main thread drains, the iothread cannot drain (and thus read
the graph). test_main_and_iothread_drain instead lets main loop
and iothread to drain together, and makes sure that no drain
happens in parallel.
Note that we are using bdrv_subtree_drained_{begin/end}_unlocked
to try avoid using the aiocontext lock as much as possible, since
it will eventually go away.
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
tests/unit/test-bdrv-drain.c | 218 +++++++++++++++++++++++++++++++++++
1 file changed, 218 insertions(+)
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 36be84ae55..bf7fdcb568 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -1534,6 +1534,219 @@ static void test_set_aio_context(void)
iothread_join(b);
}
+typedef struct ParallelDrainJob {
+ BlockJob common;
+ BlockBackend *blk;
+ BlockDriverState *bs;
+ IOThread *a;
+ bool should_fail;
+ bool allowed_to_run;
+ bool conclude_test;
+ bool job_started;
+} ParallelDrainJob;
+
+typedef struct BDRVParallelTestState {
+ ParallelDrainJob *job;
+} BDRVParallelTestState;
+
+static void coroutine_fn bdrv_test_par_co_drain(BlockDriverState *bs)
+{
+ BDRVParallelTestState *s = bs->opaque;
+ ParallelDrainJob *job = s->job;
+ assert(!qatomic_read(&job->should_fail));
+}
+
+static int parallel_run_test(Job *job, Error **errp)
+{
+ ParallelDrainJob *s = container_of(job, ParallelDrainJob, common.job);
+ s->job_started = true;
+
+ while (!s->conclude_test) {
+ if (s->allowed_to_run) {
+ bdrv_subtree_drained_begin_unlocked(s->bs);
+ bdrv_subtree_drained_end_unlocked(s->bs);
+ }
+ job_pause_point(&s->common.job);
+ }
+ s->job_started = false;
+
+ return 0;
+}
+
+static BlockDriver bdrv_test_parallel = {
+ .format_name = "test",
+ .instance_size = sizeof(BDRVParallelTestState),
+ .supports_backing = true,
+
+ .bdrv_co_drain_begin = bdrv_test_par_co_drain,
+ .bdrv_co_drain_end = bdrv_test_par_co_drain,
+
+ .bdrv_child_perm = bdrv_default_perms,
+};
+
+static bool parallel_drained_poll(BlockJob *job)
+{
+ /* need to return false otherwise a drain in coroutine deadlocks */
+ return false;
+}
+
+static const BlockJobDriver test_block_job_driver_parallel = {
+ .job_driver = {
+ .instance_size = sizeof(ParallelDrainJob),
+ .run = parallel_run_test,
+ .user_resume = block_job_user_resume,
+ .free = block_job_free,
+ },
+ .drained_poll = parallel_drained_poll,
+};
+
+static ParallelDrainJob *setup_job_test(void)
+{
+ BlockBackend *blk;
+ BlockDriverState *bs;
+ Error *err = NULL;
+ IOThread *a = iothread_new();
+ AioContext *ctx_a = iothread_get_aio_context(a);
+ ParallelDrainJob *s;
+ BDRVParallelTestState *p;
+ int ret;
+
+ blk = blk_new(qemu_get_aio_context(),
+ BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
+ blk_set_allow_aio_context_change(blk, true);
+
+ bs = bdrv_new_open_driver(&bdrv_test_parallel, "parent", 0,
+ &error_abort);
+ p = bs->opaque;
+
+ ret = blk_insert_bs(blk, bs, &error_abort);
+ assert(ret == 0);
+
+ s = block_job_create("job1", &test_block_job_driver_parallel,
+ NULL, blk_bs(blk), 0, BLK_PERM_ALL, 0, JOB_DEFAULT,
+ NULL, NULL, &err);
+ s->bs = bs;
+ s->a = a;
+ s->blk = blk;
+ p->job = s;
+
+ ret = bdrv_try_set_aio_context(bs, ctx_a, &error_abort);
+ assert(ret == 0);
+ assert(blk_get_aio_context(blk) == ctx_a);
+ assert(s->common.job.aio_context == ctx_a);
+
+ return s;
+}
+
+static void stop_and_tear_down_test(ParallelDrainJob *s)
+{
+ AioContext *ctx = iothread_get_aio_context(s->a);
+
+ /* stop iothread */
+ s->conclude_test = true;
+
+ /* wait that it's stopped */
+ while (s->job_started) {
+ aio_poll(qemu_get_current_aio_context(), false);
+ }
+
+ aio_context_acquire(ctx);
+ bdrv_unref(s->bs);
+ blk_unref(s->blk);
+ aio_context_release(ctx);
+ iothread_join(s->a);
+}
+
+/**
+ * test_main_and_then_iothread_drain: test that if the main
+ * loop drains, iothread cannot possibly drain.
+ *
+ * In this test, make sure that the main loop has firstly
+ * drained, and then allow the iothread to try and drain.
+ *
+ * Therefore, if the main loop drains, there is no way that
+ * the graph can be read or written by the iothread.
+ */
+static void test_main_and_then_iothread_drain(void)
+{
+ ParallelDrainJob *s = setup_job_test();
+
+ s->allowed_to_run = false;
+ job_start(&s->common.job);
+
+ /*
+ * Wait that the iothread starts, even though it just
+ * loops without doing anything (allowed_to_run is false)
+ */
+ while (!s->job_started) {
+ aio_poll(qemu_get_current_aio_context(), false);
+ }
+
+ /*
+ * Drain in the main loop and ensure that no drain
+ * is performed by the iothread.
+ */
+ bdrv_subtree_drained_begin_unlocked(s->bs);
+ /* iothread should be put */
+ qatomic_set(&s->should_fail, true);
+ /* let the iothread do drains */
+ s->allowed_to_run = true;
+
+ /* [perform graph modifications here], as iothread is stopped */
+ sleep(3);
+
+ /* done with modifications, let the iothread drain once it resumes */
+ qatomic_set(&s->should_fail, false);
+
+ /* drained_end should resume the iothread */
+ bdrv_subtree_drained_end_unlocked(s->bs);
+
+ stop_and_tear_down_test(s);
+}
+
+/**
+ * test_main_and_iothread_drain: test that if the main
+ * loop drains, iothread cannot possibly drain.
+ *
+ * In this test, simply let iothread and main loop concurrenly
+ * loop and drain together, making sure that iothread never
+ * reads the graph while main loop is draining.
+ *
+ * Therefore, if the main loop drains, there is no way that
+ * the graph can be read or written by the iothread.
+ */
+static void test_main_and_iothread_drain(void)
+{
+ ParallelDrainJob *s = setup_job_test();
+ int i;
+
+ /* let the iothread do drains from beginning */
+ s->allowed_to_run = true;
+ job_start(&s->common.job);
+
+ /* wait that the iothread starts and begins with drains. */
+ while (!s->job_started) {
+ aio_poll(qemu_get_current_aio_context(), false);
+ }
+
+ /* at the same time, also main loop performs drains */
+ for (i = 0; i < 1000; i++) {
+ bdrv_subtree_drained_begin_unlocked(s->bs);
+ /* iothread should be put, regardless of when it drained */
+ qatomic_set(&s->should_fail, true);
+
+ /* [perform graph modifications here] */
+
+ /* done with modifications, let the iothread drain once it resumes */
+ qatomic_set(&s->should_fail, false);
+
+ /* drained_end should resume the iothread */
+ bdrv_subtree_drained_end_unlocked(s->bs);
+ }
+
+ stop_and_tear_down_test(s);
+}
+
typedef struct TestDropBackingBlockJob {
BlockJob common;
@@ -2185,6 +2398,11 @@ int main(int argc, char **argv)
g_test_add_func("/bdrv-drain/iothread/drain_subtree",
test_iothread_drain_subtree);
+ g_test_add_func("/bdrv-drain/iothread/drain_main_and_then_iothread",
+ test_main_and_then_iothread_drain);
+ g_test_add_func("/bdrv-drain/iothread/drain_parallel",
+ test_main_and_iothread_drain);
+
g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
--
2.31.1
next prev parent reply other threads:[~2022-03-01 14:28 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-01 14:21 [RFC PATCH 0/5] Removal of AioContext lock, bs->parents and ->children: proof of concept Emanuele Giuseppe Esposito
2022-03-01 14:21 ` [RFC PATCH 1/5] aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED Emanuele Giuseppe Esposito
2022-03-02 16:21 ` Stefan Hajnoczi
2022-03-01 14:21 ` [RFC PATCH 2/5] introduce BDRV_POLL_WHILE_UNLOCKED Emanuele Giuseppe Esposito
2022-03-02 16:22 ` Stefan Hajnoczi
2022-03-09 13:49 ` Eric Blake
2022-03-01 14:21 ` [RFC PATCH 3/5] block/io.c: introduce bdrv_subtree_drained_{begin/end}_unlocked Emanuele Giuseppe Esposito
2022-03-02 16:25 ` Stefan Hajnoczi
2022-03-01 14:21 ` [RFC PATCH 4/5] child_job_drained_poll: override polling condition only when in home thread Emanuele Giuseppe Esposito
2022-03-02 16:37 ` Stefan Hajnoczi
2022-03-01 14:21 ` Emanuele Giuseppe Esposito [this message]
2022-03-01 14:26 ` [RFC PATCH 0/5] Removal of AioContext lock, bs->parents and ->children: proof of concept Emanuele Giuseppe Esposito
2022-03-02 9:47 ` Stefan Hajnoczi
2022-03-09 13:26 ` Emanuele Giuseppe Esposito
2022-03-10 15:54 ` Stefan Hajnoczi
2022-03-17 16:23 ` Emanuele Giuseppe Esposito
2022-03-30 10:53 ` Hanna Reitz
2022-03-30 11:55 ` Emanuele Giuseppe Esposito
2022-03-30 14:12 ` Hanna Reitz
2022-03-30 16:02 ` Paolo Bonzini
2022-03-31 9:59 ` Paolo Bonzini
2022-03-31 13:51 ` Emanuele Giuseppe Esposito
2022-03-31 16:40 ` Paolo Bonzini
2022-04-01 8:05 ` Emanuele Giuseppe Esposito
2022-04-01 11:01 ` Paolo Bonzini
2022-04-04 9:25 ` Stefan Hajnoczi
2022-04-04 9:41 ` Paolo Bonzini
2022-04-04 9:51 ` Emanuele Giuseppe Esposito
2022-04-04 10:07 ` Paolo Bonzini
2022-04-05 9:39 ` Stefan Hajnoczi
2022-04-05 10:43 ` Kevin Wolf
2022-04-13 13:43 ` Emanuele Giuseppe Esposito
2022-04-13 14:51 ` Kevin Wolf
2022-04-13 15:14 ` Emanuele Giuseppe Esposito
2022-04-13 15:22 ` Emanuele Giuseppe Esposito
2022-04-13 16:29 ` Kevin Wolf
2022-04-13 20:43 ` Paolo Bonzini
2022-04-13 20:46 ` Paolo Bonzini
2022-03-02 11:07 ` Vladimir Sementsov-Ogievskiy
2022-03-02 16:20 ` Stefan Hajnoczi
2022-03-09 13:26 ` Emanuele Giuseppe Esposito
2022-03-16 21:55 ` Emanuele Giuseppe Esposito
2022-03-21 12:22 ` Vladimir Sementsov-Ogievskiy
2022-03-21 15:24 ` Vladimir Sementsov-Ogievskiy
2022-03-21 15:44 ` Vladimir Sementsov-Ogievskiy
2022-03-30 9:09 ` Emanuele Giuseppe Esposito
2022-03-30 9:52 ` Vladimir Sementsov-Ogievskiy
2022-03-30 9:58 ` Emanuele Giuseppe Esposito
2022-04-05 10:55 ` Kevin Wolf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220301142113.163174-6-eesposit@redhat.com \
--to=eesposit@redhat.com \
--cc=fam@euphon.net \
--cc=hreitz@redhat.com \
--cc=jsnow@redhat.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vsementsov@virtuozzo.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).