From: Fam Zheng <famz@redhat.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: qemu-devel@nongnu.org, qemu-block@nongnu.org, kwolf@redhat.com,
stefanha@redhat.com
Subject: Re: [Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv_wakeup
Date: Fri, 14 Oct 2016 18:42:12 +0800 [thread overview]
Message-ID: <20161014104212.GE14830@lemon> (raw)
In-Reply-To: <1476380062-18001-8-git-send-email-pbonzini@redhat.com>
On Thu, 10/13 19:34, Paolo Bonzini wrote:
> We want the BDS event loop to run exclusively in the iothread that
> owns the BDS's AioContext. This function and macro provides the
> synchronization between the two event loops.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> block/block-backend.c | 7 +------
> block/io.c | 47 +++++++++++------------------------------------
> block/qed-table.c | 16 ++++------------
> block/qed.c | 4 +++-
> include/block/block.h | 9 +++++++++
> include/block/block_int.h | 1 +
> 6 files changed, 29 insertions(+), 55 deletions(-)
>
> diff --git a/block/block-backend.c b/block/block-backend.c
> index 234df1e..c5c2597 100644
> --- a/block/block-backend.c
> +++ b/block/block-backend.c
> @@ -878,7 +878,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
> int64_t bytes, CoroutineEntry co_entry,
> BdrvRequestFlags flags)
> {
> - AioContext *aio_context;
> QEMUIOVector qiov;
> struct iovec iov;
> Coroutine *co;
> @@ -900,11 +899,7 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
>
> co = qemu_coroutine_create(co_entry, &rwco);
> qemu_coroutine_enter(co);
> -
> - aio_context = blk_get_aio_context(blk);
> - while (rwco.ret == NOT_DONE) {
> - aio_poll(aio_context, true);
> - }
> + bdrv_poll_while(blk_bs(blk), rwco.ret == NOT_DONE);
Can we make it "BDRV_POLL_WHILE"? With lower case the mental model of a reader
would be "evaluate rwco.ret == NOT_DONE" first before doing the poll.
>
> return rwco.ret;
> }
> diff --git a/block/io.c b/block/io.c
> index afec968..7d3dcfc 100644
> --- a/block/io.c
> +++ b/block/io.c
> @@ -156,23 +156,12 @@ bool bdrv_requests_pending(BlockDriverState *bs)
> return false;
> }
>
> -static bool bdrv_drain_poll(BlockDriverState *bs)
> -{
> - bool waited = false;
> -
> - while (atomic_read(&bs->in_flight) > 0) {
> - aio_poll(bdrv_get_aio_context(bs), true);
> - waited = true;
> - }
> - return waited;
> -}
> -
> static bool bdrv_drain_recurse(BlockDriverState *bs)
> {
> BdrvChild *child;
> bool waited;
>
> - waited = bdrv_drain_poll(bs);
> + waited = bdrv_poll_while(bs, atomic_read(&bs->in_flight) > 0);
>
> if (bs->drv && bs->drv->bdrv_drain) {
> bs->drv->bdrv_drain(bs);
> @@ -485,9 +474,14 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
> atomic_inc(&bs->in_flight);
> }
>
> +void bdrv_wakeup(BlockDriverState *bs)
> +{
> +}
> +
> void bdrv_dec_in_flight(BlockDriverState *bs)
> {
> atomic_dec(&bs->in_flight);
> + bdrv_wakeup(bs);
> }
>
> static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
> @@ -597,13 +591,9 @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
> /* Fast-path if already in coroutine context */
> bdrv_rw_co_entry(&rwco);
> } else {
> - AioContext *aio_context = bdrv_get_aio_context(child->bs);
> -
> co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
> qemu_coroutine_enter(co);
> - while (rwco.ret == NOT_DONE) {
> - aio_poll(aio_context, true);
> - }
> + bdrv_poll_while(child->bs, rwco.ret == NOT_DONE);
> }
> return rwco.ret;
> }
> @@ -1845,14 +1835,10 @@ int64_t bdrv_get_block_status_above(BlockDriverState *bs,
> /* Fast-path if already in coroutine context */
> bdrv_get_block_status_above_co_entry(&data);
> } else {
> - AioContext *aio_context = bdrv_get_aio_context(bs);
> -
> co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
> &data);
> qemu_coroutine_enter(co);
> - while (!data.done) {
> - aio_poll(aio_context, true);
> - }
> + bdrv_poll_while(bs, !data.done);
> }
> return data.ret;
> }
> @@ -2411,13 +2397,9 @@ int bdrv_flush(BlockDriverState *bs)
> /* Fast-path if already in coroutine context */
> bdrv_flush_co_entry(&flush_co);
> } else {
> - AioContext *aio_context = bdrv_get_aio_context(bs);
> -
> co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
> qemu_coroutine_enter(co);
> - while (flush_co.ret == NOT_DONE) {
> - aio_poll(aio_context, true);
> - }
> + bdrv_poll_while(bs, flush_co.ret == NOT_DONE);
> }
>
> return flush_co.ret;
> @@ -2543,13 +2525,9 @@ int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
> /* Fast-path if already in coroutine context */
> bdrv_pdiscard_co_entry(&rwco);
> } else {
> - AioContext *aio_context = bdrv_get_aio_context(bs);
> -
> co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
> qemu_coroutine_enter(co);
> - while (rwco.ret == NOT_DONE) {
> - aio_poll(aio_context, true);
> - }
> + bdrv_poll_while(bs, rwco.ret == NOT_DONE);
> }
>
> return rwco.ret;
> @@ -2608,11 +2586,8 @@ int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
> bdrv_co_ioctl_entry(&data);
> } else {
> Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry, &data);
> -
> qemu_coroutine_enter(co);
> - while (data.ret == -EINPROGRESS) {
> - aio_poll(bdrv_get_aio_context(bs), true);
> - }
> + bdrv_poll_while(bs, data.ret == -EINPROGRESS);
> }
> return data.ret;
> }
> diff --git a/block/qed-table.c b/block/qed-table.c
> index 1a731df..b7d53ce 100644
> --- a/block/qed-table.c
> +++ b/block/qed-table.c
> @@ -174,9 +174,7 @@ int qed_read_l1_table_sync(BDRVQEDState *s)
>
> qed_read_table(s, s->header.l1_table_offset,
> s->l1_table, qed_sync_cb, &ret);
> - while (ret == -EINPROGRESS) {
> - aio_poll(bdrv_get_aio_context(s->bs), true);
> - }
> + bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>
> return ret;
> }
> @@ -195,9 +193,7 @@ int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
> int ret = -EINPROGRESS;
>
> qed_write_l1_table(s, index, n, qed_sync_cb, &ret);
> - while (ret == -EINPROGRESS) {
> - aio_poll(bdrv_get_aio_context(s->bs), true);
> - }
> + bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>
> return ret;
> }
> @@ -268,9 +264,7 @@ int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset
> int ret = -EINPROGRESS;
>
> qed_read_l2_table(s, request, offset, qed_sync_cb, &ret);
> - while (ret == -EINPROGRESS) {
> - aio_poll(bdrv_get_aio_context(s->bs), true);
> - }
> + bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>
> return ret;
> }
> @@ -290,9 +284,7 @@ int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
> int ret = -EINPROGRESS;
>
> qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret);
> - while (ret == -EINPROGRESS) {
> - aio_poll(bdrv_get_aio_context(s->bs), true);
> - }
> + bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>
> return ret;
> }
> diff --git a/block/qed.c b/block/qed.c
> index 1a7ef0a..dcb5fb9 100644
> --- a/block/qed.c
> +++ b/block/qed.c
> @@ -354,7 +354,9 @@ static void qed_start_need_check_timer(BDRVQEDState *s)
> static void qed_cancel_need_check_timer(BDRVQEDState *s)
> {
> trace_qed_cancel_need_check_timer(s);
> - timer_del(s->need_check_timer);
> + if (s->need_check_timer) {
> + timer_del(s->need_check_timer);
> + }
> }
This belongs to a separate patch, or deserves an explanation in the commit
message.
>
> static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
> diff --git a/include/block/block.h b/include/block/block.h
> index 107c603..876a1e7 100644
> --- a/include/block/block.h
> +++ b/include/block/block.h
> @@ -340,6 +340,15 @@ void bdrv_drain(BlockDriverState *bs);
> void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
> void bdrv_drain_all(void);
>
> +#define bdrv_poll_while(bs, cond) ({ \
> + bool waited_ = false; \
> + BlockDriverState *bs_ = (bs); \
> + while ((cond)) { \
> + aio_poll(bdrv_get_aio_context(bs_), true); \
> + waited_ = true; \
> + } \
> + waited_; })
> +
> int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count);
> int bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int count);
> int bdrv_has_zero_init_1(BlockDriverState *bs);
> diff --git a/include/block/block_int.h b/include/block/block_int.h
> index 5a7308b..11f877b 100644
> --- a/include/block/block_int.h
> +++ b/include/block/block_int.h
> @@ -786,6 +786,7 @@ void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in);
>
> void bdrv_inc_in_flight(BlockDriverState *bs);
> void bdrv_dec_in_flight(BlockDriverState *bs);
> +void bdrv_wakeup(BlockDriverState *bs);
>
> void blockdev_close_all_bdrv_states(void);
>
> --
> 2.7.4
>
>
next prev parent reply other threads:[~2016-10-14 10:42 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-10-13 17:34 [Qemu-devel] [PATCH 00/18] dataplane: remove RFifoLock (including almost all previously sent patches) Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 01/18] replication: interrupt failover if the main device is closed Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 02/18] blockjob: introduce .drain callback for jobs Paolo Bonzini
2016-10-16 10:02 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-17 7:53 ` [Qemu-devel] " Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 03/18] mirror: use bdrv_drained_begin/bdrv_drained_end Paolo Bonzini
2016-10-14 9:43 ` Fam Zheng
2016-10-14 10:00 ` Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 04/18] block: add BDS field to count in-flight requests Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 05/18] block: change drain to look only at one child at a time Paolo Bonzini
2016-10-14 10:12 ` Fam Zheng
2016-10-13 17:34 ` [Qemu-devel] [PATCH 06/18] qed: Implement .bdrv_drain Paolo Bonzini
2016-10-14 10:33 ` Fam Zheng
2016-10-14 10:40 ` Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv_wakeup Paolo Bonzini
2016-10-14 10:42 ` Fam Zheng [this message]
2016-10-14 10:43 ` Paolo Bonzini
2016-10-16 10:25 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-17 7:54 ` [Qemu-devel] " Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 08/18] nfs: move nfs_set_events out of the while loops Paolo Bonzini
2016-10-16 10:37 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-13 17:34 ` [Qemu-devel] [PATCH 09/18] nfs: use bdrv_poll_while and bdrv_wakeup Paolo Bonzini
2016-10-16 16:17 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-13 17:34 ` [Qemu-devel] [PATCH 10/18] sheepdog: " Paolo Bonzini
2016-10-16 16:21 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-13 17:34 ` [Qemu-devel] [PATCH 11/18] aio: introduce qemu_get_current_aio_context Paolo Bonzini
2016-10-16 16:28 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-13 17:34 ` [Qemu-devel] [PATCH 12/18] iothread: detach all block devices before stopping them Paolo Bonzini
2016-10-14 14:50 ` Fam Zheng
2016-10-14 14:59 ` Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 13/18] replication: pass BlockDriverState to reopen_backing_file Paolo Bonzini
2016-10-16 16:31 ` Stefan Hajnoczi
2016-10-13 17:34 ` [Qemu-devel] [PATCH 14/18] block: prepare bdrv_reopen_multiple to release AioContext Paolo Bonzini
2016-10-16 16:32 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-13 17:34 ` [Qemu-devel] [PATCH 15/18] block: only call aio_poll on the current thread's AioContext Paolo Bonzini
2016-10-14 14:55 ` Fam Zheng
2016-10-16 16:40 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-17 8:04 ` [Qemu-devel] " Paolo Bonzini
2016-10-18 10:10 ` Stefan Hajnoczi
2016-10-13 17:34 ` [Qemu-devel] [PATCH 16/18] iothread: release AioContext around aio_poll Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 17/18] qemu-thread: introduce QemuRecMutex Paolo Bonzini
2016-10-13 17:34 ` [Qemu-devel] [PATCH 18/18] aio: convert from RFifoLock to QemuRecMutex Paolo Bonzini
2016-10-16 16:43 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
2016-10-17 8:58 ` [Qemu-devel] [PATCH 00/18] dataplane: remove RFifoLock (including almost all previously sent patches) Christian Borntraeger
2016-10-17 9:17 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20161014104212.GE14830@lemon \
--to=famz@redhat.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).