qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jeff Cody <jcody@redhat.com>
To: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: kwolf@redhat.com, qemu-devel@nongnu.org, stefanha@redhat.com
Subject: Re: [Qemu-devel] [RFC PATCH v0 2/3] gluster: Implement .bdrv_co_write_zeroes for gluster
Date: Wed, 4 Dec 2013 14:16:28 -0500	[thread overview]
Message-ID: <20131204191628.GD3780@localhost.localdomain> (raw)
In-Reply-To: <1385104578-19369-3-git-send-email-bharata@linux.vnet.ibm.com>

On Fri, Nov 22, 2013 at 12:46:17PM +0530, Bharata B Rao wrote:
> Support .bdrv_co_write_zeroes() from gluster driver by using GlusterFS API
> glfs_zerofill() that off-loads the writing of zeroes to GlusterFS server.
> 
> Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
> ---
>  block/gluster.c | 101 ++++++++++++++++++++++++++++++++++++++++----------------
>  configure       |   8 +++++
>  2 files changed, 81 insertions(+), 28 deletions(-)
> 
> diff --git a/block/gluster.c b/block/gluster.c
> index 9f85228..15f5dfb 100644
> --- a/block/gluster.c
> +++ b/block/gluster.c
> @@ -250,6 +250,34 @@ static void qemu_gluster_complete_aio(void *opaque)
>      qemu_aio_release(acb);
>  }
>  
> +/*
> + * AIO callback routine called from GlusterFS thread.
> + */
> +static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
> +{
> +    GlusterAIOCB *acb = (GlusterAIOCB *)arg;
> +
> +    acb->ret = ret;
> +    acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
> +    qemu_bh_schedule(acb->bh);
> +}
> +
> +static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb)
> +{
> +    GlusterAIOCB *acb = (GlusterAIOCB *)blockacb;
> +    bool finished = false;
> +
> +    acb->finished = &finished;
> +    while (!finished) {
> +        qemu_aio_wait();
> +    }
> +}
> +
> +static const AIOCBInfo gluster_aiocb_info = {
> +    .aiocb_size = sizeof(GlusterAIOCB),
> +    .cancel = qemu_gluster_aio_cancel,
> +};
> +
>  /* TODO Convert to fine grained options */
>  static QemuOptsList runtime_opts = {
>      .name = "gluster",
> @@ -322,6 +350,39 @@ out:
>      return ret;
>  }
>  
> +#ifdef CONFIG_GLUSTERFS_ZEROFILL
> +static int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
> +        int64_t sector_num, int nb_sectors)
> +{
> +    int ret;
> +    GlusterAIOCB *acb;
> +    BDRVGlusterState *s = bs->opaque;
> +    off_t size;
> +    off_t offset;
> +
> +    offset = sector_num * BDRV_SECTOR_SIZE;
> +    size = nb_sectors * BDRV_SECTOR_SIZE;
> +
> +    acb = qemu_aio_get(&gluster_aiocb_info, bs, NULL, NULL);
> +    acb->size = size;
> +    acb->ret = 0;
> +    acb->finished = NULL;
> +    acb->coroutine = qemu_coroutine_self();
> +
> +    ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
> +    if (ret < 0) {

I believe glfs_zerofill_async returns -1 on failure, and sets errno.
In that case, we should set ret = -errno here.

> +        goto out;
> +    }
> +
> +    qemu_coroutine_yield();
> +    return acb->ret;
> +
> +out:
> +    qemu_aio_release(acb);
> +    return ret;
> +}
> +#endif
> +
>  static int qemu_gluster_create(const char *filename,
>          QEMUOptionParameter *options, Error **errp)
>  {
> @@ -364,34 +425,6 @@ out:
>      return ret;
>  }
>  
> -static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb)
> -{
> -    GlusterAIOCB *acb = (GlusterAIOCB *)blockacb;
> -    bool finished = false;
> -
> -    acb->finished = &finished;
> -    while (!finished) {
> -        qemu_aio_wait();
> -    }
> -}
> -
> -static const AIOCBInfo gluster_aiocb_info = {
> -    .aiocb_size = sizeof(GlusterAIOCB),
> -    .cancel = qemu_gluster_aio_cancel,
> -};
> -
> -/*
> - * AIO callback routine called from GlusterFS thread.
> - */
> -static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
> -{
> -    GlusterAIOCB *acb = (GlusterAIOCB *)arg;
> -
> -    acb->ret = ret;
> -    acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
> -    qemu_bh_schedule(acb->bh);
> -}
> -
>  static coroutine_fn int qemu_gluster_aio_rw(BlockDriverState *bs,
>          int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
>  {
> @@ -583,6 +616,9 @@ static BlockDriver bdrv_gluster = {
>  #ifdef CONFIG_GLUSTERFS_DISCARD
>      .bdrv_co_discard              = qemu_gluster_co_discard,
>  #endif
> +#ifdef CONFIG_GLUSTERFS_ZEROFILL
> +    .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
> +#endif
>      .create_options               = qemu_gluster_create_options,
>  };
>  
> @@ -604,6 +640,9 @@ static BlockDriver bdrv_gluster_tcp = {
>  #ifdef CONFIG_GLUSTERFS_DISCARD
>      .bdrv_co_discard              = qemu_gluster_co_discard,
>  #endif
> +#ifdef CONFIG_GLUSTERFS_ZEROFILL
> +    .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
> +#endif
>      .create_options               = qemu_gluster_create_options,
>  };
>  
> @@ -625,6 +664,9 @@ static BlockDriver bdrv_gluster_unix = {
>  #ifdef CONFIG_GLUSTERFS_DISCARD
>      .bdrv_co_discard              = qemu_gluster_co_discard,
>  #endif
> +#ifdef CONFIG_GLUSTERFS_ZEROFILL
> +    .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
> +#endif
>      .create_options               = qemu_gluster_create_options,
>  };
>  
> @@ -646,6 +688,9 @@ static BlockDriver bdrv_gluster_rdma = {
>  #ifdef CONFIG_GLUSTERFS_DISCARD
>      .bdrv_co_discard              = qemu_gluster_co_discard,
>  #endif
> +#ifdef CONFIG_GLUSTERFS_ZEROFILL
> +    .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
> +#endif
>      .create_options               = qemu_gluster_create_options,
>  };
>  
> diff --git a/configure b/configure
> index 508f6a5..3c267a4 100755
> --- a/configure
> +++ b/configure
> @@ -255,6 +255,7 @@ coroutine_pool=""
>  seccomp=""
>  glusterfs=""
>  glusterfs_discard="no"
> +glusterfs_zerofill="no"
>  virtio_blk_data_plane=""
>  gtk=""
>  gtkabi="2.0"
> @@ -2670,6 +2671,9 @@ if test "$glusterfs" != "no" ; then
>      if $pkg_config --atleast-version=5 glusterfs-api; then
>        glusterfs_discard="yes"
>      fi
> +    if $pkg_config --atleast-version=6 glusterfs-api; then
> +      glusterfs_zerofill="yes"
> +    fi
>    else
>      if test "$glusterfs" = "yes" ; then
>        feature_not_found "GlusterFS backend support"
> @@ -4171,6 +4175,10 @@ if test "$glusterfs_discard" = "yes" ; then
>    echo "CONFIG_GLUSTERFS_DISCARD=y" >> $config_host_mak
>  fi
>  
> +if test "$glusterfs_zerofill" = "yes" ; then
> +  echo "CONFIG_GLUSTERFS_ZEROFILL=y" >> $config_host_mak
> +fi
> +
>  if test "$libssh2" = "yes" ; then
>    echo "CONFIG_LIBSSH2=y" >> $config_host_mak
>  fi
> -- 
> 1.7.11.7
> 
> 

  reply	other threads:[~2013-12-04 19:16 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-22  7:16 [Qemu-devel] [RFC PATCH v0 0/3] gluster: conversion to coroutines and supporting write_zeroes Bharata B Rao
2013-11-22  7:16 ` [Qemu-devel] [RFC PATCH v0 1/3] gluster: Convert aio routines into coroutines Bharata B Rao
2013-12-03 14:04   ` Stefan Hajnoczi
2013-12-05 10:42     ` Bharata B Rao
2013-11-22  7:16 ` [Qemu-devel] [RFC PATCH v0 2/3] gluster: Implement .bdrv_co_write_zeroes for gluster Bharata B Rao
2013-12-04 19:16   ` Jeff Cody [this message]
2013-12-05 10:45     ` Bharata B Rao
2013-11-22  7:16 ` [Qemu-devel] [RFC PATCH v0 3/3] gluster: Add support for creating zero-filled image Bharata B Rao
2013-12-04 19:00   ` Jeff Cody

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20131204191628.GD3780@localhost.localdomain \
    --to=jcody@redhat.com \
    --cc=bharata@linux.vnet.ibm.com \
    --cc=kwolf@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).