* [Qemu-devel] [PATCH] gluster: allocate GlusterAIOCBs on the stack
@ 2015-10-01 11:04 Paolo Bonzini
2015-10-02 9:23 ` [Qemu-devel] [Qemu-block] " Kevin Wolf
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Paolo Bonzini @ 2015-10-01 11:04 UTC (permalink / raw)
To: qemu-devel; +Cc: qemu-block
This is simpler now that the driver has been converted to coroutines.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
block/gluster.c | 86 ++++++++++++++++++++++-----------------------------------
1 file changed, 33 insertions(+), 53 deletions(-)
diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..0857c14 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -429,28 +429,23 @@ static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
{
int ret;
- GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
+ GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque;
off_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset = sector_num * BDRV_SECTOR_SIZE;
- acb->size = size;
- acb->ret = 0;
- acb->coroutine = qemu_coroutine_self();
- acb->aio_context = bdrv_get_aio_context(bs);
+ acb.size = size;
+ acb.ret = 0;
+ acb.coroutine = qemu_coroutine_self();
+ acb.aio_context = bdrv_get_aio_context(bs);
- ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
+ ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
if (ret < 0) {
- ret = -errno;
- goto out;
+ return -errno;
}
qemu_coroutine_yield();
- ret = acb->ret;
-
-out:
- g_slice_free(GlusterAIOCB, acb);
- return ret;
+ return acb.ret;
}
static inline bool gluster_supports_zerofill(void)
@@ -541,35 +536,30 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
{
int ret;
- GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
+ GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque;
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset = sector_num * BDRV_SECTOR_SIZE;
- acb->size = size;
- acb->ret = 0;
- acb->coroutine = qemu_coroutine_self();
- acb->aio_context = bdrv_get_aio_context(bs);
+ acb.size = size;
+ acb.ret = 0;
+ acb.coroutine = qemu_coroutine_self();
+ acb.aio_context = bdrv_get_aio_context(bs);
if (write) {
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
- &gluster_finish_aiocb, acb);
+ gluster_finish_aiocb, &acb);
} else {
ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
- &gluster_finish_aiocb, acb);
+ gluster_finish_aiocb, &acb);
}
if (ret < 0) {
- ret = -errno;
- goto out;
+ return -errno;
}
qemu_coroutine_yield();
- ret = acb->ret;
-
-out:
- g_slice_free(GlusterAIOCB, acb);
- return ret;
+ return acb.ret;
}
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
@@ -600,26 +590,21 @@ static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
{
int ret;
- GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
+ GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque;
- acb->size = 0;
- acb->ret = 0;
- acb->coroutine = qemu_coroutine_self();
- acb->aio_context = bdrv_get_aio_context(bs);
+ acb.size = 0;
+ acb.ret = 0;
+ acb.coroutine = qemu_coroutine_self();
+ acb.aio_context = bdrv_get_aio_context(bs);
- ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
+ ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
if (ret < 0) {
- ret = -errno;
- goto out;
+ return -errno;
}
qemu_coroutine_yield();
- ret = acb->ret;
-
-out:
- g_slice_free(GlusterAIOCB, acb);
- return ret;
+ return acb.ret;
}
#ifdef CONFIG_GLUSTERFS_DISCARD
@@ -627,28 +612,23 @@ static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
int64_t sector_num, int nb_sectors)
{
int ret;
- GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
+ GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque;
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset = sector_num * BDRV_SECTOR_SIZE;
- acb->size = 0;
- acb->ret = 0;
- acb->coroutine = qemu_coroutine_self();
- acb->aio_context = bdrv_get_aio_context(bs);
+ acb.size = 0;
+ acb.ret = 0;
+ acb.coroutine = qemu_coroutine_self();
+ acb.aio_context = bdrv_get_aio_context(bs);
- ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
+ ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
if (ret < 0) {
- ret = -errno;
- goto out;
+ return -errno;
}
qemu_coroutine_yield();
- ret = acb->ret;
-
-out:
- g_slice_free(GlusterAIOCB, acb);
- return ret;
+ return acb.ret;
}
#endif
--
2.5.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [Qemu-block] [PATCH] gluster: allocate GlusterAIOCBs on the stack
2015-10-01 11:04 [Qemu-devel] [PATCH] gluster: allocate GlusterAIOCBs on the stack Paolo Bonzini
@ 2015-10-02 9:23 ` Kevin Wolf
2015-10-06 14:46 ` Stefan Hajnoczi
2015-10-07 3:00 ` [Qemu-devel] " Jeff Cody
2 siblings, 0 replies; 4+ messages in thread
From: Kevin Wolf @ 2015-10-02 9:23 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: jcody, qemu-devel, qemu-block
Am 01.10.2015 um 13:04 hat Paolo Bonzini geschrieben:
> This is simpler now that the driver has been converted to coroutines.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [Qemu-block] [PATCH] gluster: allocate GlusterAIOCBs on the stack
2015-10-01 11:04 [Qemu-devel] [PATCH] gluster: allocate GlusterAIOCBs on the stack Paolo Bonzini
2015-10-02 9:23 ` [Qemu-devel] [Qemu-block] " Kevin Wolf
@ 2015-10-06 14:46 ` Stefan Hajnoczi
2015-10-07 3:00 ` [Qemu-devel] " Jeff Cody
2 siblings, 0 replies; 4+ messages in thread
From: Stefan Hajnoczi @ 2015-10-06 14:46 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Jeff Cody, qemu-devel, qemu-block
On Thu, Oct 01, 2015 at 01:04:38PM +0200, Paolo Bonzini wrote:
> This is simpler now that the driver has been converted to coroutines.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> block/gluster.c | 86 ++++++++++++++++++++++-----------------------------------
> 1 file changed, 33 insertions(+), 53 deletions(-)
CCing Jeff on Gluster patches.
> diff --git a/block/gluster.c b/block/gluster.c
> index 1eb3a8c..0857c14 100644
> --- a/block/gluster.c
> +++ b/block/gluster.c
> @@ -429,28 +429,23 @@ static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
> int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
> {
> int ret;
> - GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
> + GlusterAIOCB acb;
> BDRVGlusterState *s = bs->opaque;
> off_t size = nb_sectors * BDRV_SECTOR_SIZE;
> off_t offset = sector_num * BDRV_SECTOR_SIZE;
>
> - acb->size = size;
> - acb->ret = 0;
> - acb->coroutine = qemu_coroutine_self();
> - acb->aio_context = bdrv_get_aio_context(bs);
> + acb.size = size;
> + acb.ret = 0;
> + acb.coroutine = qemu_coroutine_self();
> + acb.aio_context = bdrv_get_aio_context(bs);
>
> - ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
> + ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
> if (ret < 0) {
> - ret = -errno;
> - goto out;
> + return -errno;
> }
>
> qemu_coroutine_yield();
> - ret = acb->ret;
> -
> -out:
> - g_slice_free(GlusterAIOCB, acb);
> - return ret;
> + return acb.ret;
> }
>
> static inline bool gluster_supports_zerofill(void)
> @@ -541,35 +536,30 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
> int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
> {
> int ret;
> - GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
> + GlusterAIOCB acb;
> BDRVGlusterState *s = bs->opaque;
> size_t size = nb_sectors * BDRV_SECTOR_SIZE;
> off_t offset = sector_num * BDRV_SECTOR_SIZE;
>
> - acb->size = size;
> - acb->ret = 0;
> - acb->coroutine = qemu_coroutine_self();
> - acb->aio_context = bdrv_get_aio_context(bs);
> + acb.size = size;
> + acb.ret = 0;
> + acb.coroutine = qemu_coroutine_self();
> + acb.aio_context = bdrv_get_aio_context(bs);
>
> if (write) {
> ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
> - &gluster_finish_aiocb, acb);
> + gluster_finish_aiocb, &acb);
> } else {
> ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
> - &gluster_finish_aiocb, acb);
> + gluster_finish_aiocb, &acb);
> }
>
> if (ret < 0) {
> - ret = -errno;
> - goto out;
> + return -errno;
> }
>
> qemu_coroutine_yield();
> - ret = acb->ret;
> -
> -out:
> - g_slice_free(GlusterAIOCB, acb);
> - return ret;
> + return acb.ret;
> }
>
> static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
> @@ -600,26 +590,21 @@ static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
> static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
> {
> int ret;
> - GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
> + GlusterAIOCB acb;
> BDRVGlusterState *s = bs->opaque;
>
> - acb->size = 0;
> - acb->ret = 0;
> - acb->coroutine = qemu_coroutine_self();
> - acb->aio_context = bdrv_get_aio_context(bs);
> + acb.size = 0;
> + acb.ret = 0;
> + acb.coroutine = qemu_coroutine_self();
> + acb.aio_context = bdrv_get_aio_context(bs);
>
> - ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
> + ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
> if (ret < 0) {
> - ret = -errno;
> - goto out;
> + return -errno;
> }
>
> qemu_coroutine_yield();
> - ret = acb->ret;
> -
> -out:
> - g_slice_free(GlusterAIOCB, acb);
> - return ret;
> + return acb.ret;
> }
>
> #ifdef CONFIG_GLUSTERFS_DISCARD
> @@ -627,28 +612,23 @@ static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
> int64_t sector_num, int nb_sectors)
> {
> int ret;
> - GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
> + GlusterAIOCB acb;
> BDRVGlusterState *s = bs->opaque;
> size_t size = nb_sectors * BDRV_SECTOR_SIZE;
> off_t offset = sector_num * BDRV_SECTOR_SIZE;
>
> - acb->size = 0;
> - acb->ret = 0;
> - acb->coroutine = qemu_coroutine_self();
> - acb->aio_context = bdrv_get_aio_context(bs);
> + acb.size = 0;
> + acb.ret = 0;
> + acb.coroutine = qemu_coroutine_self();
> + acb.aio_context = bdrv_get_aio_context(bs);
>
> - ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
> + ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
> if (ret < 0) {
> - ret = -errno;
> - goto out;
> + return -errno;
> }
>
> qemu_coroutine_yield();
> - ret = acb->ret;
> -
> -out:
> - g_slice_free(GlusterAIOCB, acb);
> - return ret;
> + return acb.ret;
> }
> #endif
>
> --
> 2.5.0
>
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH] gluster: allocate GlusterAIOCBs on the stack
2015-10-01 11:04 [Qemu-devel] [PATCH] gluster: allocate GlusterAIOCBs on the stack Paolo Bonzini
2015-10-02 9:23 ` [Qemu-devel] [Qemu-block] " Kevin Wolf
2015-10-06 14:46 ` Stefan Hajnoczi
@ 2015-10-07 3:00 ` Jeff Cody
2 siblings, 0 replies; 4+ messages in thread
From: Jeff Cody @ 2015-10-07 3:00 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: qemu-devel, qemu-block
On Thu, Oct 01, 2015 at 01:04:38PM +0200, Paolo Bonzini wrote:
> This is simpler now that the driver has been converted to coroutines.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> block/gluster.c | 86 ++++++++++++++++++++++-----------------------------------
> 1 file changed, 33 insertions(+), 53 deletions(-)
>
Thanks,
Applied to my block branch:
git git://github.com/codyprime/qemu-kvm-jtc.git block
-Jeff
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2015-10-07 3:01 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-10-01 11:04 [Qemu-devel] [PATCH] gluster: allocate GlusterAIOCBs on the stack Paolo Bonzini
2015-10-02 9:23 ` [Qemu-devel] [Qemu-block] " Kevin Wolf
2015-10-06 14:46 ` Stefan Hajnoczi
2015-10-07 3:00 ` [Qemu-devel] " Jeff Cody
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).