qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fiona Ebner <f.ebner@proxmox.com>
To: qemu-block@nongnu.org
Cc: qemu-devel@nongnu.org, kwolf@redhat.com, den@virtuozzo.com,
	andrey.drobyshev@virtuozzo.com, hreitz@redhat.com,
	stefanha@redhat.com, eblake@redhat.com, jsnow@redhat.com,
	vsementsov@yandex-team.ru, xiechanglong.d@gmail.com,
	wencongyang2@huawei.com, berto@igalia.com, fam@euphon.net,
	ari@tuxera.com
Subject: [PATCH v4 15/48] block: move drain outside of bdrv_root_unref_child()
Date: Fri, 30 May 2025 17:10:52 +0200	[thread overview]
Message-ID: <20250530151125.955508-16-f.ebner@proxmox.com> (raw)
In-Reply-To: <20250530151125.955508-1-f.ebner@proxmox.com>

This is part of resolving the deadlock mentioned in commit "block:
move draining out of bdrv_change_aio_context() and mark GRAPH_RDLOCK".

bdrv_root_unref_child() is called by:
1. blk_remove_bs(), where a drained section is introduced.
2. bdrv_unref_child(), which runs under the graph lock, so the drain
   will be moved further up to its callers.
3. block_job_remove_all_bdrv(), where a drained section is introduced.

For all callers of bdrv_unref_child() and its generated
bdrv_co_unref_child() coroutine variant, a drained section is
introduced, they are not explicilty listed here. The caller
quorum_del_child() holds the graph lock, so it is not actually allowed
to drain. This will be addressed in the next commit.

Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
---

Changes in v4:
* Document requirement that all nodes need to be drained for
  bdrv_root_unref_child() and bdrv_unref_child().
* Also cover bdrv_co_unref_child().

 block.c                      | 18 ++++++++++++++----
 block/blklogwrites.c         |  4 ++++
 block/blkverify.c            |  2 ++
 block/block-backend.c        |  2 ++
 block/qcow2.c                |  4 ++++
 block/quorum.c               |  6 ++++++
 block/replication.c          |  2 ++
 block/snapshot.c             |  2 ++
 block/vmdk.c                 | 10 ++++++++++
 blockjob.c                   |  2 ++
 tests/unit/test-bdrv-drain.c |  4 ++++
 11 files changed, 52 insertions(+), 4 deletions(-)

diff --git a/block.c b/block.c
index f6c2f7e208..15a8ccb822 100644
--- a/block.c
+++ b/block.c
@@ -1721,12 +1721,14 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
 open_failed:
     bs->drv = NULL;
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     if (bs->file != NULL) {
         bdrv_unref_child(bs, bs->file);
         assert(!bs->file);
     }
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 
     g_free(bs->opaque);
     bs->opaque = NULL;
@@ -3305,7 +3307,11 @@ out:
     return ret < 0 ? NULL : child;
 }
 
-/* Callers must ensure that child->frozen is false. */
+/*
+ * Callers must ensure that child->frozen is false.
+ *
+ * All block nodes must be drained.
+ */
 void bdrv_root_unref_child(BdrvChild *child)
 {
     BlockDriverState *child_bs = child->bs;
@@ -3326,10 +3332,8 @@ void bdrv_root_unref_child(BdrvChild *child)
          * When the parent requiring a non-default AioContext is removed, the
          * node moves back to the main AioContext
          */
-        bdrv_drain_all_begin();
         bdrv_try_change_aio_context_locked(child_bs, qemu_get_aio_context(),
                                            NULL, NULL);
-        bdrv_drain_all_end();
     }
 
     bdrv_schedule_unref(child_bs);
@@ -3402,7 +3406,11 @@ bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child,
     }
 }
 
-/* Callers must ensure that child->frozen is false. */
+/*
+ * Callers must ensure that child->frozen is false.
+ *
+ * All block nodes must be drained.
+ */
 void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child)
 {
     GLOBAL_STATE_CODE();
@@ -5172,6 +5180,7 @@ static void bdrv_close(BlockDriverState *bs)
         bs->drv = NULL;
     }
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
         bdrv_unref_child(bs, child);
@@ -5180,6 +5189,7 @@ static void bdrv_close(BlockDriverState *bs)
     assert(!bs->backing);
     assert(!bs->file);
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 
     g_free(bs->opaque);
     bs->opaque = NULL;
diff --git a/block/blklogwrites.c b/block/blklogwrites.c
index b0f78c4bc7..70ac76f401 100644
--- a/block/blklogwrites.c
+++ b/block/blklogwrites.c
@@ -281,9 +281,11 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
     ret = 0;
 fail_log:
     if (ret < 0) {
+        bdrv_drain_all_begin();
         bdrv_graph_wrlock();
         bdrv_unref_child(bs, s->log_file);
         bdrv_graph_wrunlock();
+        bdrv_drain_all_end();
         s->log_file = NULL;
         qemu_mutex_destroy(&s->mutex);
     }
@@ -296,10 +298,12 @@ static void blk_log_writes_close(BlockDriverState *bs)
 {
     BDRVBlkLogWritesState *s = bs->opaque;
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     bdrv_unref_child(bs, s->log_file);
     s->log_file = NULL;
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
     qemu_mutex_destroy(&s->mutex);
 }
 
diff --git a/block/blkverify.c b/block/blkverify.c
index db79a36681..3a71f7498c 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -151,10 +151,12 @@ static void blkverify_close(BlockDriverState *bs)
 {
     BDRVBlkverifyState *s = bs->opaque;
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     bdrv_unref_child(bs, s->test_file);
     s->test_file = NULL;
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 }
 
 static int64_t coroutine_fn GRAPH_RDLOCK
diff --git a/block/block-backend.c b/block/block-backend.c
index 24cae3cb55..68209bb2f7 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -889,9 +889,11 @@ void blk_remove_bs(BlockBackend *blk)
     root = blk->root;
     blk->root = NULL;
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     bdrv_root_unref_child(root);
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 }
 
 /*
diff --git a/block/qcow2.c b/block/qcow2.c
index 66fba89b41..45451a7ee8 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1895,7 +1895,9 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
     g_free(s->image_data_file);
     if (open_data_file && has_data_file(bs)) {
         bdrv_graph_co_rdunlock();
+        bdrv_drain_all_begin();
         bdrv_co_unref_child(bs, s->data_file);
+        bdrv_drain_all_end();
         bdrv_graph_co_rdlock();
         s->data_file = NULL;
     }
@@ -2821,9 +2823,11 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file)
     if (close_data_file && has_data_file(bs)) {
         GLOBAL_STATE_CODE();
         bdrv_graph_rdunlock_main_loop();
+        bdrv_drain_all_begin();
         bdrv_graph_wrlock();
         bdrv_unref_child(bs, s->data_file);
         bdrv_graph_wrunlock();
+        bdrv_drain_all_end();
         s->data_file = NULL;
         bdrv_graph_rdlock_main_loop();
     }
diff --git a/block/quorum.c b/block/quorum.c
index ed8ce801ee..81407a38ee 100644
--- a/block/quorum.c
+++ b/block/quorum.c
@@ -1037,6 +1037,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
 
 close_exit:
     /* cleanup on error */
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     for (i = 0; i < s->num_children; i++) {
         if (!opened[i]) {
@@ -1045,6 +1046,7 @@ close_exit:
         bdrv_unref_child(bs, s->children[i]);
     }
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
     g_free(s->children);
     g_free(opened);
 exit:
@@ -1057,11 +1059,13 @@ static void quorum_close(BlockDriverState *bs)
     BDRVQuorumState *s = bs->opaque;
     int i;
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     for (i = 0; i < s->num_children; i++) {
         bdrv_unref_child(bs, s->children[i]);
     }
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 
     g_free(s->children);
 }
@@ -1143,7 +1147,9 @@ quorum_del_child(BlockDriverState *bs, BdrvChild *child, Error **errp)
             (s->num_children - i - 1) * sizeof(BdrvChild *));
     s->children = g_renew(BdrvChild *, s->children, --s->num_children);
 
+    bdrv_drain_all_begin();
     bdrv_unref_child(bs, child);
+    bdrv_drain_all_end();
 
     quorum_refresh_flags(bs);
 }
diff --git a/block/replication.c b/block/replication.c
index 54cbd03e00..0879718854 100644
--- a/block/replication.c
+++ b/block/replication.c
@@ -656,12 +656,14 @@ static void replication_done(void *opaque, int ret)
     if (ret == 0) {
         s->stage = BLOCK_REPLICATION_DONE;
 
+        bdrv_drain_all_begin();
         bdrv_graph_wrlock();
         bdrv_unref_child(bs, s->secondary_disk);
         s->secondary_disk = NULL;
         bdrv_unref_child(bs, s->hidden_disk);
         s->hidden_disk = NULL;
         bdrv_graph_wrunlock();
+        bdrv_drain_all_end();
 
         s->error = 0;
     } else {
diff --git a/block/snapshot.c b/block/snapshot.c
index 9f300a78bd..28c9c43621 100644
--- a/block/snapshot.c
+++ b/block/snapshot.c
@@ -291,9 +291,11 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
         }
 
         /* .bdrv_open() will re-attach it */
+        bdrv_drain_all_begin();
         bdrv_graph_wrlock();
         bdrv_unref_child(bs, fallback);
         bdrv_graph_wrunlock();
+        bdrv_drain_all_end();
 
         ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
         memset(bs->opaque, 0, drv->instance_size);
diff --git a/block/vmdk.c b/block/vmdk.c
index 9c7ab037e1..89a7250120 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -271,6 +271,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
     BDRVVmdkState *s = bs->opaque;
     VmdkExtent *e;
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     for (i = 0; i < s->num_extents; i++) {
         e = &s->extents[i];
@@ -283,6 +284,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
         }
     }
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 
     g_free(s->extents);
 }
@@ -1247,9 +1249,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
                             0, 0, 0, 0, 0, &extent, errp);
             if (ret < 0) {
                 bdrv_graph_rdunlock_main_loop();
+                bdrv_drain_all_begin();
                 bdrv_graph_wrlock();
                 bdrv_unref_child(bs, extent_file);
                 bdrv_graph_wrunlock();
+                bdrv_drain_all_end();
                 bdrv_graph_rdlock_main_loop();
                 goto out;
             }
@@ -1266,9 +1270,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
             g_free(buf);
             if (ret) {
                 bdrv_graph_rdunlock_main_loop();
+                bdrv_drain_all_begin();
                 bdrv_graph_wrlock();
                 bdrv_unref_child(bs, extent_file);
                 bdrv_graph_wrunlock();
+                bdrv_drain_all_end();
                 bdrv_graph_rdlock_main_loop();
                 goto out;
             }
@@ -1277,9 +1283,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
             ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp);
             if (ret) {
                 bdrv_graph_rdunlock_main_loop();
+                bdrv_drain_all_begin();
                 bdrv_graph_wrlock();
                 bdrv_unref_child(bs, extent_file);
                 bdrv_graph_wrunlock();
+                bdrv_drain_all_end();
                 bdrv_graph_rdlock_main_loop();
                 goto out;
             }
@@ -1287,9 +1295,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
         } else {
             error_setg(errp, "Unsupported extent type '%s'", type);
             bdrv_graph_rdunlock_main_loop();
+            bdrv_drain_all_begin();
             bdrv_graph_wrlock();
             bdrv_unref_child(bs, extent_file);
             bdrv_graph_wrunlock();
+            bdrv_drain_all_end();
             bdrv_graph_rdlock_main_loop();
             ret = -ENOTSUP;
             goto out;
diff --git a/blockjob.c b/blockjob.c
index 44991e3ff7..e68181a35b 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -198,6 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
      * one to make sure that such a concurrent access does not attempt
      * to process an already freed BdrvChild.
      */
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     while (job->nodes) {
         GSList *l = job->nodes;
@@ -211,6 +212,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
         g_slist_free_1(l);
     }
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 }
 
 bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index ac76525e5a..59c2793725 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -955,11 +955,13 @@ static void bdrv_test_top_close(BlockDriverState *bs)
 {
     BdrvChild *c, *next_c;
 
+    bdrv_drain_all_begin();
     bdrv_graph_wrlock();
     QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
         bdrv_unref_child(bs, c);
     }
     bdrv_graph_wrunlock();
+    bdrv_drain_all_end();
 }
 
 static int coroutine_fn GRAPH_RDLOCK
@@ -1016,7 +1018,9 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
         bdrv_graph_co_rdlock();
         QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
             bdrv_graph_co_rdunlock();
+            bdrv_drain_all_begin();
             bdrv_co_unref_child(bs, c);
+            bdrv_drain_all_end();
             bdrv_graph_co_rdlock();
         }
         bdrv_graph_co_rdunlock();
-- 
2.39.5




  parent reply	other threads:[~2025-05-30 15:19 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-30 15:10 [PATCH v4 00/48] block: do not drain while holding the graph lock Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 01/48] block: remove outdated comments about AioContext locking Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 02/48] block: move drain outside of read-locked bdrv_reopen_queue_child() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 03/48] block/snapshot: move drain outside of read-locked bdrv_snapshot_delete() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 04/48] block: move drain outside of read-locked bdrv_inactivate_recurse() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 05/48] block: mark bdrv_parent_change_aio_context() GRAPH_RDLOCK Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 06/48] block: mark change_aio_ctx() callback and instances as GRAPH_RDLOCK(_PTR) Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 07/48] block: mark bdrv_child_change_aio_context() GRAPH_RDLOCK Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 08/48] block: move drain outside of bdrv_change_aio_context() and mark GRAPH_RDLOCK Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 09/48] block: move drain outside of bdrv_try_change_aio_context() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 10/48] block: move drain outside of bdrv_attach_child_common(_abort)() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 11/48] block: move drain outside of bdrv_set_backing_hd_drained() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 12/48] block: move drain outside of bdrv_root_attach_child() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 13/48] block: move drain outside of bdrv_attach_child() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 14/48] block: move drain outside of quorum_add_child() Fiona Ebner
2025-05-30 15:10 ` Fiona Ebner [this message]
2025-05-30 15:10 ` [PATCH v4 16/48] block: move drain outside of quorum_del_child() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 17/48] blockdev: drain while unlocked in internal_snapshot_action() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 18/48] blockdev: drain while unlocked in external_snapshot_action() Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 19/48] block: mark bdrv_drained_begin() and friends as GRAPH_UNLOCKED Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 20/48] iotests/graph-changes-while-io: remove image file after test Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 21/48] iotests/graph-changes-while-io: add test case with removal of lower snapshot Fiona Ebner
2025-05-30 15:10 ` [PATCH v4 22/48] block/io: remove duplicate GLOBAL_STATE_CODE() in bdrv_do_drained_end() Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 23/48] block: never use atomics to access bs->quiesce_counter Fiona Ebner
2025-06-02 14:45   ` Fiona Ebner
2025-07-01 11:24     ` Kevin Wolf
2025-05-30 15:11 ` [PATCH v4 24/48] block: add bdrv_graph_wrlock_drained() convenience wrapper Fiona Ebner
2025-07-01 11:37   ` Kevin Wolf
2025-05-30 15:11 ` [PATCH v4 25/48] block/mirror: switch to bdrv_set_backing_hd_drained() variant Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 26/48] block/commit: " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 27/48] block: call bdrv_set_backing_hd() while unlocked in bdrv_open_backing_file() Fiona Ebner
2025-07-01 13:13   ` Kevin Wolf
2025-05-30 15:11 ` [PATCH v4 28/48] block: mark bdrv_set_backing_hd() as GRAPH_UNLOCKED Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 29/48] blockdev: avoid locking and draining multiple times in external_snapshot_abort() Fiona Ebner
2025-06-02  8:56   ` Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 30/48] block: drop wrapper for bdrv_set_backing_hd_drained() Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 31/48] block-backend: mark blk_drain_all() as GRAPH_UNLOCKED Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 32/48] block/snapshot: mark bdrv_all_delete_snapshot() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 33/48] block/stream: mark stream_prepare() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 34/48] block: mark bdrv_reopen_queue() and bdrv_reopen_multiple() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 35/48] block: mark bdrv_inactivate() as GRAPH_RDLOCK and move drain to callers Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 36/48] block: mark bdrv_inactivate_all() as GRAPH_UNLOCKED Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 37/48] block: mark blk_remove_bs() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 38/48] block: mark blk_drain() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 39/48] block-backend: mark blk_io_limits_disable() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 40/48] block/commit: mark commit_abort() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 41/48] block: mark bdrv_new() " Fiona Ebner
2025-07-01 16:55   ` Kevin Wolf
2025-05-30 15:11 ` [PATCH v4 42/48] block: mark bdrv_replace_child_bs() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 43/48] block: mark bdrv_insert_node() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 44/48] block: mark bdrv_drop_intermediate() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 45/48] block: mark bdrv_close_all() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 46/48] block: mark bdrv_close() " Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 47/48] block: mark bdrv_open_child_common() and its callers GRAPH_UNLOCKED Fiona Ebner
2025-05-30 15:11 ` [PATCH v4 48/48] blockjob: mark block_job_remove_all_bdrv() as GRAPH_UNLOCKED Fiona Ebner
2025-06-03 14:54 ` [PATCH v4 00/48] block: do not drain while holding the graph lock Kevin Wolf
2025-06-04  7:38   ` Fiona Ebner
2025-07-01 17:16 ` Kevin Wolf
2025-07-14 13:43   ` Kevin Wolf
2025-07-15 13:24     ` Fiona Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250530151125.955508-16-f.ebner@proxmox.com \
    --to=f.ebner@proxmox.com \
    --cc=andrey.drobyshev@virtuozzo.com \
    --cc=ari@tuxera.com \
    --cc=berto@igalia.com \
    --cc=den@virtuozzo.com \
    --cc=eblake@redhat.com \
    --cc=fam@euphon.net \
    --cc=hreitz@redhat.com \
    --cc=jsnow@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=vsementsov@yandex-team.ru \
    --cc=wencongyang2@huawei.com \
    --cc=xiechanglong.d@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).