From: Zhi Yong Wu <zwu.kernel@gmail.com>
To: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
Cc: Kevin Wolf <kwolf@redhat.com>,
Anthony Liguori <aliguori@us.ibm.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
qemu-devel@nongnu.org, Zhi Yong Wu <wuzhy@cn.ibm.com>,
Avi Kivity <avi@redhat.com>, Christoph Hellwig <hch@lst.de>
Subject: Re: [Qemu-devel] [PATCH 3/6] block: switch bdrv_read()/bdrv_write() to coroutines
Date: Tue, 11 Oct 2011 14:44:58 +0800 [thread overview]
Message-ID: <CAEH94LiRGoTHwaeUNv6J86kiAB4+2jU2QBsgxpx2Uvr4CPhW=Q@mail.gmail.com> (raw)
In-Reply-To: <1317831427-477-4-git-send-email-stefanha@linux.vnet.ibm.com>
On Thu, Oct 6, 2011 at 12:17 AM, Stefan Hajnoczi
<stefanha@linux.vnet.ibm.com> wrote:
> The bdrv_read()/bdrv_write() functions call .bdrv_read()/.bdrv_write().
> They should go through bdrv_co_do_readv() and bdrv_co_do_writev()
> instead in order to unify request processing code across sync, aio, and
> coroutine interfaces. This is also an important step towards removing
> BlockDriverState .bdrv_read()/.bdrv_write() in the future.
>
> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> ---
> block.c | 112 +++++++++++++++++++++++++++++++++++----------------------------
> 1 files changed, 62 insertions(+), 50 deletions(-)
>
> diff --git a/block.c b/block.c
> index d15784e..90c29db 100644
> --- a/block.c
> +++ b/block.c
> @@ -44,6 +44,8 @@
> #include <windows.h>
> #endif
>
> +#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
> +
> static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
> static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
> int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
> @@ -74,6 +76,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
> static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs);
> static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
> int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
> +static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
> + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
>
> static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
> QTAILQ_HEAD_INITIALIZER(bdrv_states);
> @@ -1038,30 +1042,69 @@ static inline bool bdrv_has_async_flush(BlockDriver *drv)
> return drv->bdrv_aio_flush != bdrv_aio_flush_em;
> }
>
> -/* return < 0 if error. See bdrv_write() for the return codes */
> -int bdrv_read(BlockDriverState *bs, int64_t sector_num,
> - uint8_t *buf, int nb_sectors)
> +typedef struct RwCo {
> + BlockDriverState *bs;
> + int64_t sector_num;
> + int nb_sectors;
> + QEMUIOVector *qiov;
> + bool is_write;
> + int ret;
> +} RwCo;
> +
> +static void coroutine_fn bdrv_rw_co_entry(void *opaque)
> {
> - BlockDriver *drv = bs->drv;
> + RwCo *rwco = opaque;
>
> - if (!drv)
> - return -ENOMEDIUM;
> + if (!rwco->is_write) {
> + rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
> + rwco->nb_sectors, rwco->qiov);
> + } else {
> + rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
> + rwco->nb_sectors, rwco->qiov);
> + }
> +}
>
> - if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) {
> - QEMUIOVector qiov;
> - struct iovec iov = {
> - .iov_base = (void *)buf,
> - .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
> - };
> +/*
> + * Process a synchronous request using coroutines
> + */
> +static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
> + int nb_sectors, bool is_write)
> +{
> + QEMUIOVector qiov;
> + struct iovec iov = {
> + .iov_base = (void *)buf,
> + .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
> + };
> + Coroutine *co;
> + RwCo rwco = {
> + .bs = bs,
> + .sector_num = sector_num,
> + .nb_sectors = nb_sectors,
> + .qiov = &qiov,
> + .is_write = is_write,
> + .ret = NOT_DONE,
> + };
>
> - qemu_iovec_init_external(&qiov, &iov, 1);
> - return bdrv_co_readv(bs, sector_num, nb_sectors, &qiov);
> - }
> + qemu_iovec_init_external(&qiov, &iov, 1);
>
> - if (bdrv_check_request(bs, sector_num, nb_sectors))
> - return -EIO;
> + if (qemu_in_coroutine()) {
> + /* Fast-path if already in coroutine context */
> + bdrv_rw_co_entry(&rwco);
> + } else {
> + co = qemu_coroutine_create(bdrv_rw_co_entry);
> + qemu_coroutine_enter(co, &rwco);
> + while (rwco.ret == NOT_DONE) {
> + qemu_aio_wait();
> + }
> + }
> + return rwco.ret;
> +}
>
> - return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
> +/* return < 0 if error. See bdrv_write() for the return codes */
> +int bdrv_read(BlockDriverState *bs, int64_t sector_num,
> + uint8_t *buf, int nb_sectors)
> +{
> + return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
> }
>
> static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
> @@ -1101,36 +1144,7 @@ static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
> int bdrv_write(BlockDriverState *bs, int64_t sector_num,
> const uint8_t *buf, int nb_sectors)
> {
> - BlockDriver *drv = bs->drv;
> -
> - if (!bs->drv)
> - return -ENOMEDIUM;
> -
> - if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) {
> - QEMUIOVector qiov;
> - struct iovec iov = {
> - .iov_base = (void *)buf,
> - .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
> - };
> -
> - qemu_iovec_init_external(&qiov, &iov, 1);
> - return bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
> - }
> -
> - if (bs->read_only)
> - return -EACCES;
> - if (bdrv_check_request(bs, sector_num, nb_sectors))
> - return -EIO;
> -
> - if (bs->dirty_bitmap) {
> - set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
> - }
> -
> - if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
> - bs->wr_highest_sector = sector_num + nb_sectors - 1;
> - }
The above codes are removed, will it be safe?
> -
> - return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
> + return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
> }
>
> int bdrv_pread(BlockDriverState *bs, int64_t offset,
> @@ -2891,8 +2905,6 @@ static void bdrv_rw_em_cb(void *opaque, int ret)
> *(int *)opaque = ret;
> }
>
> -#define NOT_DONE 0x7fffffff
> -
> static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
> uint8_t *buf, int nb_sectors)
> {
> --
> 1.7.6.3
>
>
>
--
Regards,
Zhi Yong Wu
next prev parent reply other threads:[~2011-10-11 6:45 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-10-05 16:17 [Qemu-devel] [PATCH 0/6] block: do request processing in a coroutine Stefan Hajnoczi
2011-10-05 16:17 ` [Qemu-devel] [PATCH 1/6] block: directly invoke .bdrv_aio_*() in bdrv_co_io_em() Stefan Hajnoczi
2011-10-05 16:17 ` [Qemu-devel] [PATCH 2/6] block: split out bdrv_co_do_readv() and bdrv_co_do_writev() Stefan Hajnoczi
2011-10-05 16:17 ` [Qemu-devel] [PATCH 3/6] block: switch bdrv_read()/bdrv_write() to coroutines Stefan Hajnoczi
2011-10-11 6:44 ` Zhi Yong Wu [this message]
2011-10-12 9:03 ` Stefan Hajnoczi
2011-10-12 9:11 ` Zhi Yong Wu
2011-10-12 12:53 ` Kevin Wolf
2011-10-05 16:17 ` [Qemu-devel] [PATCH 4/6] block: switch bdrv_aio_readv() " Stefan Hajnoczi
2011-10-12 13:07 ` Kevin Wolf
2011-10-05 16:17 ` [Qemu-devel] [PATCH 5/6] block: mark blocks dirty on coroutine write completion Stefan Hajnoczi
2011-10-05 16:17 ` [Qemu-devel] [PATCH 6/6] block: switch bdrv_aio_writev() to coroutines Stefan Hajnoczi
2011-10-11 6:46 ` Zhi Yong Wu
2011-10-11 6:49 ` [Qemu-devel] [PATCH 0/6] block: do request processing in a coroutine Zhi Yong Wu
2011-10-12 13:21 ` Kevin Wolf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CAEH94LiRGoTHwaeUNv6J86kiAB4+2jU2QBsgxpx2Uvr4CPhW=Q@mail.gmail.com' \
--to=zwu.kernel@gmail.com \
--cc=aliguori@us.ibm.com \
--cc=avi@redhat.com \
--cc=hch@lst.de \
--cc=kwolf@redhat.com \
--cc=mtosatti@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@linux.vnet.ibm.com \
--cc=wuzhy@cn.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).