qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, qemu-devel@nongnu.org, s.reiter@proxmox.com,
	armbru@redhat.com, mreitz@redhat.com, stefanha@redhat.com,
	t.lamprecht@proxmox.com
Subject: [RFC PATCH 1/3] block: Factor out bdrv_run_co()
Date: Tue, 12 May 2020 16:43:16 +0200	[thread overview]
Message-ID: <20200512144318.181049-2-kwolf@redhat.com> (raw)
In-Reply-To: <20200512144318.181049-1-kwolf@redhat.com>

We have a few bdrv_*() functions that can either spawn a new coroutine
and wait for it with BDRV_POLL_WHILE() or use a fastpath if they are
alreeady running in a coroutine. All of them duplicate basically the
same code.

Factor the common code into a new function bdrv_run_co().

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 block/io.c | 104 +++++++++++++++--------------------------------------
 1 file changed, 28 insertions(+), 76 deletions(-)

diff --git a/block/io.c b/block/io.c
index 7d30e61edc..c1badaadc9 100644
--- a/block/io.c
+++ b/block/io.c
@@ -891,6 +891,22 @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
     return 0;
 }
 
+static int bdrv_run_co(BlockDriverState *bs, CoroutineEntry *entry,
+                       void *opaque, int *ret)
+{
+    if (qemu_in_coroutine()) {
+        /* Fast-path if already in coroutine context */
+        entry(opaque);
+    } else {
+        Coroutine *co = qemu_coroutine_create(entry, opaque);
+        *ret = NOT_DONE;
+        bdrv_coroutine_enter(bs, co);
+        BDRV_POLL_WHILE(bs, *ret == NOT_DONE);
+    }
+
+    return *ret;
+}
+
 typedef struct RwCo {
     BdrvChild *child;
     int64_t offset;
@@ -923,25 +939,15 @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
                         QEMUIOVector *qiov, bool is_write,
                         BdrvRequestFlags flags)
 {
-    Coroutine *co;
     RwCo rwco = {
         .child = child,
         .offset = offset,
         .qiov = qiov,
         .is_write = is_write,
-        .ret = NOT_DONE,
         .flags = flags,
     };
 
-    if (qemu_in_coroutine()) {
-        /* Fast-path if already in coroutine context */
-        bdrv_rw_co_entry(&rwco);
-    } else {
-        co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
-        bdrv_coroutine_enter(child->bs, co);
-        BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
-    }
-    return rwco.ret;
+    return bdrv_run_co(child->bs, bdrv_rw_co_entry, &rwco, &rwco.ret);
 }
 
 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
@@ -2230,7 +2236,6 @@ typedef struct BdrvCoBlockStatusData {
     int64_t *map;
     BlockDriverState **file;
     int ret;
-    bool done;
 } BdrvCoBlockStatusData;
 
 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
@@ -2492,7 +2497,6 @@ static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
                                            data->want_zero,
                                            data->offset, data->bytes,
                                            data->pnum, data->map, data->file);
-    data->done = true;
     aio_wait_kick();
 }
 
@@ -2508,7 +2512,6 @@ static int bdrv_common_block_status_above(BlockDriverState *bs,
                                           int64_t *map,
                                           BlockDriverState **file)
 {
-    Coroutine *co;
     BdrvCoBlockStatusData data = {
         .bs = bs,
         .base = base,
@@ -2518,18 +2521,9 @@ static int bdrv_common_block_status_above(BlockDriverState *bs,
         .pnum = pnum,
         .map = map,
         .file = file,
-        .done = false,
     };
 
-    if (qemu_in_coroutine()) {
-        /* Fast-path if already in coroutine context */
-        bdrv_block_status_above_co_entry(&data);
-    } else {
-        co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
-        bdrv_coroutine_enter(bs, co);
-        BDRV_POLL_WHILE(bs, !data.done);
-    }
-    return data.ret;
+    return bdrv_run_co(bs, bdrv_block_status_above_co_entry, &data, &data.ret);
 }
 
 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
@@ -2669,22 +2663,13 @@ static inline int
 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
                 bool is_read)
 {
-    if (qemu_in_coroutine()) {
-        return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
-    } else {
-        BdrvVmstateCo data = {
-            .bs         = bs,
-            .qiov       = qiov,
-            .pos        = pos,
-            .is_read    = is_read,
-            .ret        = -EINPROGRESS,
-        };
-        Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
-
-        bdrv_coroutine_enter(bs, co);
-        BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
-        return data.ret;
-    }
+    BdrvVmstateCo data = {
+        .bs         = bs,
+        .qiov       = qiov,
+        .pos        = pos,
+        .is_read    = is_read,
+    };
+    return bdrv_run_co(bs, bdrv_co_rw_vmstate_entry, &data, &data.ret);
 }
 
 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
@@ -2890,22 +2875,11 @@ early_exit:
 
 int bdrv_flush(BlockDriverState *bs)
 {
-    Coroutine *co;
     FlushCo flush_co = {
         .bs = bs,
-        .ret = NOT_DONE,
     };
 
-    if (qemu_in_coroutine()) {
-        /* Fast-path if already in coroutine context */
-        bdrv_flush_co_entry(&flush_co);
-    } else {
-        co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
-        bdrv_coroutine_enter(bs, co);
-        BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
-    }
-
-    return flush_co.ret;
+    return bdrv_run_co(bs, bdrv_flush_co_entry, &flush_co, &flush_co.ret);
 }
 
 typedef struct DiscardCo {
@@ -3038,24 +3012,13 @@ out:
 
 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes)
 {
-    Coroutine *co;
     DiscardCo rwco = {
         .child = child,
         .offset = offset,
         .bytes = bytes,
-        .ret = NOT_DONE,
     };
 
-    if (qemu_in_coroutine()) {
-        /* Fast-path if already in coroutine context */
-        bdrv_pdiscard_co_entry(&rwco);
-    } else {
-        co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
-        bdrv_coroutine_enter(child->bs, co);
-        BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
-    }
-
-    return rwco.ret;
+    return bdrv_run_co(child->bs, bdrv_pdiscard_co_entry, &rwco, &rwco.ret);
 }
 
 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
@@ -3477,7 +3440,6 @@ static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
 int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
                   PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
 {
-    Coroutine *co;
     TruncateCo tco = {
         .child      = child,
         .offset     = offset,
@@ -3485,17 +3447,7 @@ int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
         .prealloc   = prealloc,
         .flags      = flags,
         .errp       = errp,
-        .ret        = NOT_DONE,
     };
 
-    if (qemu_in_coroutine()) {
-        /* Fast-path if already in coroutine context */
-        bdrv_truncate_co_entry(&tco);
-    } else {
-        co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
-        bdrv_coroutine_enter(child->bs, co);
-        BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
-    }
-
-    return tco.ret;
+    return bdrv_run_co(child->bs, bdrv_truncate_co_entry, &tco, &tco.ret);
 }
-- 
2.25.3



  reply	other threads:[~2020-05-12 14:46 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-12 14:43 [RFC PATCH 0/3] block: Synchronous bdrv_*() from coroutine in different AioContext Kevin Wolf
2020-05-12 14:43 ` Kevin Wolf [this message]
2020-05-12 15:37   ` [RFC PATCH 1/3] block: Factor out bdrv_run_co() Eric Blake
2020-05-20  9:09     ` Philippe Mathieu-Daudé
2020-05-20 11:14       ` Vladimir Sementsov-Ogievskiy
2020-05-12 14:43 ` [RFC PATCH 2/3] block: Allow bdrv_run_co() from different AioContext Kevin Wolf
2020-05-12 16:02   ` Thomas Lamprecht
2020-05-12 19:29     ` Kevin Wolf
2020-05-25 14:18   ` Stefan Reiter
2020-05-25 16:41     ` Kevin Wolf
2020-05-26 16:42       ` Kevin Wolf
2020-05-27  8:56         ` Stefan Reiter
2020-05-12 14:43 ` [RFC PATCH 3/3] block: Assert we're running in the right thread Kevin Wolf
2020-05-14 13:52   ` Stefan Reiter
2020-05-14 14:30     ` Kevin Wolf
2020-05-20  9:12       ` Philippe Mathieu-Daudé
2020-05-14 13:21 ` [RFC PATCH 0/3] block: Synchronous bdrv_*() from coroutine in different AioContext Thomas Lamprecht
2020-05-14 14:26   ` Kevin Wolf
2020-05-19 12:32     ` Vladimir Sementsov-Ogievskiy
2020-05-19 13:54       ` Denis Plotnikov
2020-05-19 14:18         ` Kevin Wolf
2020-05-19 15:05           ` Denis Plotnikov
2020-05-19 15:29             ` Kevin Wolf
2020-05-19 15:48               ` Vladimir Sementsov-Ogievskiy
2020-05-19 16:06                 ` Eric Blake
2020-05-20  7:23               ` Denis Plotnikov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200512144318.181049-2-kwolf@redhat.com \
    --to=kwolf@redhat.com \
    --cc=armbru@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=s.reiter@proxmox.com \
    --cc=stefanha@redhat.com \
    --cc=t.lamprecht@proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).