* [Qemu-devel] [RFC PATCH 2/3] block: GlusterFS helpers to interface with libglusterfs
2012-06-11 14:18 [Qemu-devel] [RFC PATCH 0/3] GlusterFS support in QEMU Bharata B Rao
2012-06-11 14:19 ` [Qemu-devel] [RFC PATCH 1/3] qemu: Add a config option for GlusterFS as block backend Bharata B Rao
@ 2012-06-11 14:20 ` Bharata B Rao
2012-06-18 17:35 ` Stefan Hajnoczi
2012-06-11 14:21 ` [Qemu-devel] [RFC PATCH 3/3] block: gluster as block backend Bharata B Rao
` (2 subsequent siblings)
4 siblings, 1 reply; 17+ messages in thread
From: Bharata B Rao @ 2012-06-11 14:20 UTC (permalink / raw)
To: qemu-devel; +Cc: Amar Tumballi, Vijay Bellur
block: GlusterFS helpers to interface with libglusterfs
From: Bharata B Rao <bharata@linux.vnet.ibm.com>
This patch does two things:
- Adds GlusterFS specific init routines that enable QEMU to load
volume file and load necessary translators.
- Implements routines like gluster_open(), gluster_read(), gluster_write()
and gluster_close() that will be used by block layer of QEMU to do
IO on VM images exported by GlusterFS server.
When libglusterfsclient is resurrected, this entire patch becomes redundant.
Gluster init routines and other POSIX calls are present in libglusterfsclient.
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
---
Makefile.objs | 1
block/gluster-helpers.c | 1018 +++++++++++++++++++++++++++++++++++++++++++++++
block/gluster-helpers.h | 40 ++
3 files changed, 1059 insertions(+), 0 deletions(-)
create mode 100644 block/gluster-helpers.c
create mode 100644 block/gluster-helpers.h
diff --git a/Makefile.objs b/Makefile.objs
index 70c5c79..25190ba 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -59,6 +59,7 @@ block-nested-$(CONFIG_POSIX) += raw-posix.o
block-nested-$(CONFIG_LIBISCSI) += iscsi.o
block-nested-$(CONFIG_CURL) += curl.o
block-nested-$(CONFIG_RBD) += rbd.o
+block-nested-$(CONFIG_GLUSTERFS) += gluster-helpers.o
block-obj-y += $(addprefix block/, $(block-nested-y))
diff --git a/block/gluster-helpers.c b/block/gluster-helpers.c
new file mode 100644
index 0000000..cae3fdf
--- /dev/null
+++ b/block/gluster-helpers.c
@@ -0,0 +1,1018 @@
+/*
+ * Helper routines for GlusterFS backend
+ * (Based on libglusterfsclient)
+ *
+ * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#include <stdio.h>
+#include "gluster-helpers.h"
+static glusterfs_ctx_t *gctx;
+
+/********** Translator helpers ***********/
+typedef struct {
+ pthread_mutex_t lock;
+ pthread_cond_t reply_cond;
+ call_stub_t *reply_stub;
+ char complete;
+ loc_t *loc;
+ fd_t *fd;
+ gluster_aiocb_t *gaiocb;
+ struct iobref *iobref;
+ struct iobuf *iob;
+} gluster_local_t;
+
+static call_frame_t *get_call_frame_for_req(glusterfs_ctx_t *ctx)
+{
+ call_pool_t *pool = ctx->pool;
+ xlator_t *this = (xlator_t *)ctx->active->top;
+ call_frame_t *frame;
+
+ frame = create_frame(this, pool);
+ if (!frame) {
+ return NULL;
+ }
+
+ frame->root->uid = geteuid();
+ frame->root->gid = getegid();
+ return frame;
+}
+
+#define GLUSTER_FOP(ctx, stub, op, local, args ...) \
+ do { \
+ call_frame_t *frame = get_call_frame_for_req(ctx); \
+ xlator_t *xl = (xlator_t *)ctx->active->top; \
+ frame->local = local; \
+ frame->root->state = gctx; \
+ pthread_cond_init(&local->reply_cond, NULL); \
+ pthread_mutex_init(&local->lock, NULL); \
+ STACK_WIND(frame, gluster_##op##_cbk, xl, xl->fops->op, args); \
+ pthread_mutex_lock(&local->lock); \
+ { \
+ while (!local->complete) { \
+ pthread_cond_wait(&local->reply_cond, &local->lock); \
+ } \
+ } \
+ pthread_mutex_unlock(&local->lock); \
+ stub = local->reply_stub; \
+ FREE(frame->local); \
+ frame->local = NULL; \
+ STACK_DESTROY(frame->root); \
+ } while (0) \
+
+#define GLUSTER_FOP_NO_WAIT(ctx, stub, op, local, args ...) \
+ do { \
+ call_frame_t *frame = get_call_frame_for_req(ctx); \
+ xlator_t *xl = (xlator_t *)ctx->active->top; \
+ frame->local = local; \
+ frame->root->state = gctx; \
+ STACK_WIND(frame, gluster_##op##_cbk, xl, xl->fops->op, args); \
+ stub = local->reply_stub; \
+ } while (0) \
+
+#define GLUSTER_REPLY_NOTIFY(local) \
+ do { \
+ pthread_mutex_lock(&local->lock); \
+ local->complete = 1; \
+ pthread_cond_broadcast(&local->reply_cond); \
+ pthread_mutex_unlock(&local->lock); \
+ } while (0) \
+
+/********** Name resolution ***********/
+static int32_t gluster_lookup_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int op_ret, int op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *dict, struct iatt *postparent)
+{
+ gluster_local_t *local = frame->local;
+ inode_t *link_inode;
+
+ if (op_ret) {
+ goto out;
+ }
+
+ /* TODO: Do this only for non-root inodes */
+ link_inode = inode_link(inode, local->loc->parent, local->loc->name, buf);
+ local->loc->inode = link_inode;
+out:
+ local->reply_stub = fop_lookup_cbk_stub(frame, NULL, op_ret, op_errno,
+ inode, buf, dict, postparent);
+
+ GLUSTER_REPLY_NOTIFY(local);
+ return 0;
+}
+
+/*
+ * Lookup @path under @parent. Return with inode reference held.
+ */
+static inode_t *gluster_lookup(inode_t *parent, inode_table_t *itable,
+ char *path)
+{
+ inode_t *inode = NULL;
+ loc_t loc;
+ gluster_local_t *local;
+ call_stub_t *stub;
+ int op_ret;
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ return NULL;
+ }
+
+ loc.path = gf_strdup(path);
+ loc.name = basename(path);
+ loc.inode = inode_new(itable);
+ loc.parent = parent; /* inode_ref ? */
+ uuid_copy(loc.pargfid, parent->gfid);
+ local->loc = &loc;
+ GLUSTER_FOP(gctx, stub, lookup, local, &loc, NULL);
+
+ op_ret = stub->args.lookup_cbk.op_ret;
+ errno = stub->args.lookup_cbk.op_errno;
+
+ if (op_ret == -1) {
+ goto out;
+ }
+
+ inode = stub->args.lookup_cbk.inode;
+ if (inode != loc.inode) {
+ inode_unref(loc.inode);
+ (void)inode_ref(inode);
+ } else {
+ inode = loc.inode;
+ }
+out:
+ /* inode_unref(parent); */
+ call_stub_destroy(stub);
+ GF_FREE((void *)loc.path);
+ return inode;
+}
+
+static int gluster_path_resolve_hard(loc_t *loc, int lookup_basename)
+{
+ inode_t *parent, *inode, *curr;
+ inode_table_t *itable;
+ char *pathname, *basename, *token, *next_token, *strtokptr;
+ int ret = 0;
+
+ basename = gf_strdup(loc->path);
+ if (!basename) {
+ ret = -1;
+ goto out;
+ }
+
+ if (!lookup_basename) {
+ pathname = dirname(basename);
+ } else {
+ pathname = basename;
+ }
+
+ itable = ((xlator_t *)gctx->active->top)->itable;
+ parent = inode_from_path(itable, "/");
+ if (!parent) {
+ parent = gluster_lookup(parent, itable, "/");
+ if (!parent) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ token = strtok_r(pathname, "/", &strtokptr);
+ if (!token) {
+ /* root inode */
+ loc->inode = parent;
+ loc->parent = NULL;
+ ret = 0;
+ goto out;
+ }
+
+ while (token) {
+ curr = inode_grep(itable, parent, token);
+ if (!curr) {
+ loc->parent = parent;
+ inode = gluster_lookup(parent, itable, token);
+ if (!inode) {
+ ret = -1;
+ goto out;
+ }
+ loc->inode = inode;
+ curr = inode;
+ }
+ next_token = strtok_r(NULL, "/", &strtokptr);
+ if (next_token) {
+ inode_unref(parent);
+ parent = curr;
+ curr = NULL;
+ } else {
+ inode = curr;
+ loc->parent = parent;
+ loc->inode = inode;
+ }
+ token = next_token;
+ }
+out:
+ if (!ret && !lookup_basename) {
+ if (loc->parent) {
+ inode_unref(loc->parent);
+ }
+ if (loc->inode) {
+ loc->parent = loc->inode;
+ loc->inode = NULL;
+ }
+ }
+ if (basename) {
+ GF_FREE(basename);
+ }
+ return ret;
+}
+
+/*
+ * Resolve loc.path to loc->parent and loc->inode.
+ */
+static int gluster_path_resolve(loc_t *loc, int lookup_basename)
+{
+ inode_t *parent, *inode = NULL;
+ inode_table_t *itable;
+ char *pathname, *dir;
+ int ret = 0;
+
+ pathname = gf_strdup(loc->path);
+ if (!pathname) {
+ ret = -1;
+ goto out;
+ }
+
+ dir = dirname(pathname);
+ itable = ((xlator_t *)gctx->active->top)->itable;
+
+ /* Check if the inode exists */
+ parent = inode_from_path(itable, dir);
+ if (parent) {
+ loc->parent = parent;
+ if (!lookup_basename) {
+ /* Got parent */
+ ret = 0;
+ goto out;
+ } else {
+ inode = inode_from_path(itable, loc->path);
+ if (inode) {
+ /* Got the complete path */
+ loc->inode = inode;
+ ret = 0;
+ goto out;
+ }
+ }
+ }
+
+ if (loc->parent) {
+ inode_unref(loc->parent);
+ loc->parent = NULL;
+ } else if (inode) {
+ /* shouldn't happen */
+ inode_unref(inode);
+ }
+ ret = gluster_path_resolve_hard(loc, lookup_basename);
+out:
+ if (pathname) {
+ GF_FREE(pathname);
+ }
+ return ret;
+}
+
+/********** POSIX interfaces ***********/
+static int gluster_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iovec *vector,
+ int32_t count, struct iatt *stbuf, struct iobref *iobref)
+{
+ gluster_local_t *local = frame->local;
+ gluster_aiocb_t *gaiocb = local->gaiocb;
+ call_stub_t *stub = NULL;
+
+ local->reply_stub = fop_readv_cbk_stub(frame, NULL, op_ret,
+ op_errno, vector, count, stbuf, iobref, NULL);
+
+ stub = local->reply_stub;
+ if (!local->gaiocb) {
+ GLUSTER_REPLY_NOTIFY(local);
+ goto out;
+ } else {
+ op_ret = stub->args.readv_cbk.op_ret;
+ errno = stub->args.readv_cbk.op_errno;
+ count = stub->args.readv_cbk.count;
+ vector = stub->args.readv_cbk.vector;
+
+ if (op_ret > 0) {
+ int i = 0, op_ret = 0;
+ size_t size = gaiocb->size;
+ char *buf = gaiocb->buf;
+
+ while (size && (i < count)) {
+ int len = (size < vector[i].iov_len) ? size: vector[i].iov_len;
+ memcpy(buf, vector[i++].iov_base, len);
+ buf += len;
+ size -= len;
+ op_ret += len;
+ }
+ }
+
+ gaiocb->ret = op_ret;
+ if (gaiocb->completion_fn) {
+ gaiocb->completion_fn(gaiocb);
+ }
+
+ FREE(local);
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+ call_stub_destroy(stub);
+ }
+out:
+ return op_ret;
+}
+
+int gluster_aio_readv(fd_t *fd, gluster_aiocb_t *gaiocb)
+{
+ int op_ret = 0;
+ call_stub_t *stub;
+ gluster_local_t *local;
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ return -1;
+ }
+
+ local->gaiocb = gaiocb;
+ GLUSTER_FOP_NO_WAIT(gctx, stub, readv, local, fd, gaiocb->size,
+ gaiocb->offset, 0, NULL);
+
+ return op_ret;
+}
+
+static int gluster_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf)
+{
+ gluster_local_t *local = frame->local;
+ gluster_aiocb_t *gaiocb = local->gaiocb;
+ call_stub_t *stub = NULL;
+
+ local->reply_stub = fop_writev_cbk_stub(frame, NULL, op_ret, op_errno,
+ prebuf, postbuf, NULL);
+ stub = local->reply_stub;
+
+ if (!local->gaiocb) {
+ GLUSTER_REPLY_NOTIFY(local);
+ goto out;
+ } else {
+ gaiocb->ret = op_ret = stub->args.writev_cbk.op_ret;
+ errno = stub->args.writev_cbk.op_errno;
+
+ if (gaiocb->completion_fn) {
+ gaiocb->completion_fn(gaiocb);
+ }
+ if (local->iob) {
+ iobuf_unref(local->iob);
+ }
+ if (local->iobref) {
+ iobref_unref(local->iobref);
+ }
+
+ FREE(local);
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+ call_stub_destroy(stub);
+ }
+out:
+ return op_ret;
+}
+
+int gluster_aio_writev(fd_t *fd, gluster_aiocb_t *gaiocb)
+{
+ int op_ret = 0;
+ struct iobref *iobref = NULL;
+ struct iobuf *iob = NULL;
+ struct iovec iov;
+ gluster_local_t *local;
+ call_stub_t *stub = NULL;
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ goto out;
+ }
+
+ local->gaiocb = gaiocb;
+ iobref = local->iobref = iobref_new();
+ if (!iobref) {
+ goto out;
+ }
+
+ iob = local->iob = iobuf_get(gctx->iobuf_pool);
+ if (!iob) {
+ goto out;
+ }
+
+ memcpy(iob->ptr, gaiocb->buf, gaiocb->size);
+ iobref_add(iobref, iob);
+ iov.iov_base = local->iob->ptr;
+ iov.iov_len = gaiocb->size;
+
+ GLUSTER_FOP_NO_WAIT(gctx, stub, writev, local, fd, &iov, 1,
+ gaiocb->offset, 0, iobref, NULL);
+out:
+ return op_ret;
+}
+
+static void gluster_loc_wipe(loc_t *loc)
+{
+ if (loc->path) {
+ GF_FREE((void *)loc->path);
+ }
+ if (loc->parent) {
+ inode_unref(loc->parent);
+ }
+ if (loc->inode) {
+ inode_unref(loc->inode);
+ }
+ loc->path = loc->name = NULL;
+}
+
+static int gluster_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent, struct iatt *postparent)
+{
+ gluster_local_t *local = frame->local;
+
+ local->reply_stub = fop_create_cbk_stub(frame, NULL, op_ret, op_errno,
+ fd, inode, buf, preparent, postparent, NULL);
+ GLUSTER_REPLY_NOTIFY(local);
+ return 0;
+}
+
+static int __gluster_do_create(loc_t *loc, int flags, int mode, fd_t *fd)
+{
+ call_stub_t *stub = NULL;
+ gluster_local_t *local;
+ dict_t *dict = NULL;
+ int ret = -1;
+ inode_t *inode;
+ uuid_t gfid;
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ goto out;
+ }
+
+ uuid_generate(gfid);
+ ret = dict_set_static_bin(dict, "gfid-req", gfid, 16);
+ if (ret < 0) {
+ goto out;
+ }
+
+ GLUSTER_FOP(gctx, stub, create, local, loc, flags, mode, 0, fd, dict);
+ ret = stub->args.create_cbk.op_ret;
+ errno = stub->args.create_cbk.op_errno;
+ if (ret == -1) {
+ goto out;
+ }
+
+ inode = stub->args.create_cbk.inode;
+ inode_link(inode, loc->parent, loc->name, &stub->args.create_cbk.buf);
+ inode_lookup(inode);
+out:
+ call_stub_destroy(stub);
+ if (dict) {
+ dict_unref(dict);
+ }
+ return ret;
+}
+
+static int gluster_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd)
+{
+ gluster_local_t *local = frame->local;
+
+ local->reply_stub = fop_open_cbk_stub(frame, NULL, op_ret, op_errno, fd,
+ NULL);
+ GLUSTER_REPLY_NOTIFY(local);
+ return 0;
+}
+
+static int __gluster_do_open(loc_t *loc, int flags, fd_t *fd)
+{
+ call_stub_t *stub = NULL;
+ gluster_local_t *local;
+ int ret = -1;
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ goto out;
+ }
+
+ GLUSTER_FOP(gctx, stub, open, local, loc, flags, fd, 0);
+ ret = stub->args.open_cbk.op_ret;
+ errno = stub->args.open_cbk.op_errno;
+out:
+ call_stub_destroy(stub);
+ return ret;
+}
+
+static gluster_file_t gluster_do_open(const char *path, int flags, int mode)
+{
+ int ret = 0;
+ loc_t loc = {0, };
+ fd_t *fd = NULL;
+ int create = ((flags & O_CREAT) == O_CREAT) ? 1: 0;
+ char *pathname = NULL;
+
+ loc.path = gf_strdup(path);
+ ret = gluster_path_resolve(&loc, 1);
+ if (ret == -1 && !create) {
+ goto out;
+ }
+
+ if (!ret && create && ((flags & O_EXCL) == O_EXCL)) {
+ /* EEXIST */
+ ret = -1;
+ goto out;
+ }
+
+ if (ret == -1 && create) {
+ inode_table_t *itable = ((xlator_t *)gctx->active->top)->itable;
+
+ gluster_loc_wipe(&loc);
+
+ /* lookup parent */
+ loc.path = gf_strdup(path);
+ ret = gluster_path_resolve(&loc, 0);
+ if (ret == -1) {
+ goto out;
+ }
+
+ /* alloc new inode for child */
+ loc.inode = inode_new(itable);
+ }
+
+ pathname = gf_strdup(path);
+ loc.name = basename(pathname);
+
+ fd = fd_create(loc.inode, getpid());
+ if (!fd) {
+ goto out;
+ }
+ fd->flags = flags;
+
+ if (!create && loc.inode && !uuid_is_null(loc.inode->gfid)) {
+ uuid_copy(loc.gfid, loc.inode->gfid);
+ }
+ if (loc.parent && !uuid_is_null(loc.parent->gfid)) {
+ uuid_copy(loc.pargfid, loc.parent->gfid);
+ }
+
+ if (create) {
+ ret = __gluster_do_create(&loc, flags, mode, fd);
+ } else {
+ ret = __gluster_do_open(&loc, flags, fd);
+ }
+
+ if (ret == -1) {
+ fd_unref(fd);
+ fd = NULL;
+ }
+out:
+ gluster_loc_wipe(&loc);
+ if (pathname)
+ GF_FREE(pathname);
+ return (gluster_file_t)fd;
+}
+
+gluster_file_t gluster_open(const char *path, int flags, int mode)
+{
+ return gluster_do_open(path, flags, mode);
+}
+
+gluster_file_t gluster_creat(const char *path, int mode)
+{
+ return gluster_do_open(path, (O_CREAT | O_WRONLY | O_TRUNC), mode);
+}
+
+static int gluster_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf)
+{
+ gluster_local_t *local = frame->local;
+
+ local->reply_stub = fop_fstat_cbk_stub(frame, NULL, op_ret, op_errno, buf,
+ NULL);
+ GLUSTER_REPLY_NOTIFY(local);
+ return 0;
+}
+
+int gluster_fstat(gluster_file_t fd, struct stat *buf)
+{
+ int op_ret = -1;
+ gluster_local_t *local;
+ call_stub_t *stub = NULL;
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ goto out;
+ }
+
+ GLUSTER_FOP(gctx, stub, fstat, local, fd, NULL);
+ op_ret = stub->args.fstat_cbk.op_ret;
+ errno = stub->args.fstat_cbk.op_errno;
+
+ if (!op_ret) {
+ iatt_to_stat(&stub->args.fstat_cbk.buf, buf);
+ }
+out:
+ call_stub_destroy(stub);
+ return op_ret;
+}
+
+static int gluster_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gluster_local_t *local = frame->local;
+
+ local->reply_stub = fop_flush_cbk_stub(frame, NULL, op_ret, op_errno, NULL);
+ GLUSTER_REPLY_NOTIFY(local);
+ return 0;
+}
+
+int gluster_close(gluster_file_t fd)
+{
+ int op_ret = -1;
+ gluster_local_t *local;
+ call_stub_t *stub = NULL;
+
+ if (!fd) {
+ goto out;
+ }
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ goto out;
+ }
+
+ GLUSTER_FOP(gctx, stub, flush, local, fd, NULL);
+ op_ret = stub->args.flush_cbk.op_ret;
+ errno = stub->args.flush_cbk.op_errno;
+
+out:
+ call_stub_destroy(stub);
+ return op_ret;
+}
+
+static int gluster_ftruncate_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *xlator, int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf)
+{
+ gluster_local_t *local = frame->local;
+
+ local->reply_stub = fop_ftruncate_cbk_stub(frame, NULL, op_ret, op_errno,
+ prebuf, postbuf, NULL);
+ GLUSTER_REPLY_NOTIFY(local);
+ return 0;
+}
+
+int gluster_ftruncate(gluster_file_t fd, off_t length)
+{
+ int op_ret = -1;
+ gluster_local_t *local;
+ call_stub_t *stub;
+
+ if (!fd) {
+ goto out;
+ }
+
+ local = CALLOC(1, sizeof(*local));
+ if (!local) {
+ goto out;
+ }
+
+ GLUSTER_FOP(gctx, stub, ftruncate, local, fd, length, NULL);
+ op_ret = stub->args.ftruncate_cbk.op_ret;
+ errno = stub->args.ftruncate_cbk.op_errno;
+
+out:
+ call_stub_destroy(stub);
+ return op_ret;
+}
+
+/********** Glusterfs initialization ******/
+struct xlator_fops gluster_master_fops = { };
+struct xlator_cbks gluster_master_cbks = { };
+
+static int gluster_master_init(xlator_t *this)
+{
+ return 0;
+}
+
+static void gluster_master_fini(xlator_t *this)
+{
+ return;
+}
+
+static int gluster_master_notify(xlator_t *this, int32_t event, void *data, ...)
+{
+ glusterfs_graph_t *graph = data;
+ inode_table_t *itable;
+
+ switch (event) {
+ case GF_EVENT_GRAPH_NEW:
+ /* This should ideally be under GF_EVENT_CHILD_UP */
+ if (!graph) {
+ break;
+ }
+ graph->used = 1;
+ itable = inode_table_new(0, graph->top);
+ if (!itable) {
+ return -1;
+ }
+ ((xlator_t *)graph->top)->itable = itable;
+ break;
+ case GF_EVENT_CHILD_UP:
+ case GF_EVENT_CHILD_DOWN:
+ case GF_EVENT_CHILD_CONNECTING:
+ break;
+ case GF_EVENT_AUTH_FAILED:
+ gluster_master_fini(this);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Master translator that's equivalent of FUSE
+ * Can't do xlator_dyn_load since we don't have .so for this - Need
+ * to populate the master xlator manually.
+ */
+static int gluster_master_setup(glusterfs_ctx_t *ctx)
+{
+ xlator_t *master;
+
+ master = CALLOC(1, sizeof(*master));
+ if (!master) {
+ goto out;
+ }
+
+ master->type = gf_strdup("gluster/master");
+ master->name = gf_strdup("gluster-master");
+ master->init = gluster_master_init;
+ master->fops = &gluster_master_fops;
+ master->cbks = &gluster_master_cbks;
+ master->notify = gluster_master_notify;
+ master->fini = gluster_master_fini;
+ INIT_LIST_HEAD(&master->volume_options);
+
+ master->ctx = ctx;
+ if (xlator_init(master)) {
+ goto out;
+ }
+ ctx->master = master;
+ return 0;
+out:
+ if (master) {
+ FREE(master);
+ }
+ return -1;
+}
+
+static void gluster_ctx_destroy(void)
+{
+ call_pool_t *pool;
+
+ if (!gctx) {
+ return;
+ }
+ if (gctx->iobuf_pool) {
+ iobuf_pool_destroy(gctx->iobuf_pool);
+ }
+ if (gctx->dict_pool) {
+ mem_pool_destroy(gctx->dict_pool);
+ }
+ if (gctx->dict_pair_pool) {
+ mem_pool_destroy(gctx->dict_pair_pool);
+ }
+ if (gctx->dict_data_pool) {
+ mem_pool_destroy(gctx->dict_data_pool);
+ }
+ if (gctx->stub_mem_pool) {
+ mem_pool_destroy(gctx->stub_mem_pool);
+ }
+
+ pool = gctx->pool;
+ if (pool) {
+ if (pool->frame_mem_pool) {
+ mem_pool_destroy(pool->frame_mem_pool);
+ }
+ if (pool->stack_mem_pool) {
+ mem_pool_destroy(pool->stack_mem_pool);
+ }
+ GF_FREE(pool);
+ }
+}
+
+static int gluster_ctx_init(void)
+{
+ int ret = 0;
+ call_pool_t *pool = NULL;
+ cmd_args_t *cmd_args = NULL;
+
+ gctx = CALLOC(1, sizeof(*gctx));
+ if (!gctx) {
+ ret = -1;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&gctx->graphs);
+ INIT_LIST_HEAD(&gctx->mempool_list);
+ ret = pthread_mutex_init(&gctx->lock, NULL);
+ if (ret) {
+ goto out;
+ }
+
+ THIS->ctx = gctx;
+
+ ret = -1;
+ gctx->page_size = 128 * GF_UNIT_KB;
+ gctx->iobuf_pool = iobuf_pool_new();
+ if (!gctx->iobuf_pool) {
+ goto out;
+ }
+
+ gctx->event_pool = event_pool_new(16384);
+ if (!gctx->event_pool) {
+ goto out;
+ }
+
+ gctx->dict_pool = mem_pool_new(dict_t, 1024);
+ if (!gctx->dict_pool) {
+ goto out;
+ }
+
+ gctx->dict_pair_pool = mem_pool_new(data_pair_t, 16*GF_UNIT_KB);
+ if (!gctx->dict_pair_pool) {
+ goto out;
+ }
+
+ gctx->dict_data_pool = mem_pool_new(data_t, 8*GF_UNIT_KB);
+ if (!gctx->dict_data_pool) {
+ goto out;
+ }
+
+ pool = CALLOC(1, sizeof(call_pool_t));
+ if (!pool) {
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&pool->all_frames);
+ LOCK_INIT(&pool->lock);
+ gctx->pool = pool;
+
+ pool->frame_mem_pool = mem_pool_new(call_frame_t, 4096);
+ if (!pool->frame_mem_pool) {
+ goto out;
+ }
+
+ pool->stack_mem_pool = mem_pool_new(call_stack_t, 1024);
+ if (!pool->stack_mem_pool) {
+ goto out;
+ }
+
+ gctx->stub_mem_pool = mem_pool_new(call_stub_t, 1024);
+ if (!gctx->stub_mem_pool) {
+ goto out;
+ }
+
+ cmd_args = &gctx->cmd_args;
+ INIT_LIST_HEAD(&cmd_args->xlator_options);
+ return 0;
+
+out:
+ gluster_ctx_destroy();
+ return ret;
+}
+
+static int gluster_graph_init(char *volfile)
+{
+ FILE *fp = NULL;
+ glusterfs_graph_t *graph = NULL;
+ int ret = -1;
+
+ fp = fopen(volfile, "r");
+ if (!fp) {
+ ret = -errno;
+ goto out;
+ }
+
+ graph = glusterfs_graph_construct(fp);
+ if (!graph) {
+ goto out;
+ }
+
+ ret = glusterfs_graph_prepare(graph, gctx);
+ if (ret) {
+ goto out;
+ }
+
+ ret = glusterfs_graph_activate(graph, gctx);
+ if (ret) {
+ goto out;
+ }
+out:
+ if (fp) {
+ fclose(fp);
+ }
+ if (graph && ret) {
+ glusterfs_graph_destroy(graph);
+ gctx->active = NULL;
+ }
+ return ret;
+}
+
+static int gluster_logging_init(char *logfile)
+{
+ if (gf_log_init(logfile) == -1) {
+ return -1;
+ }
+ gf_log_set_loglevel(GF_LOG_INFO);
+ return 0;
+}
+
+static void *gluster_handle_poll(void *arg)
+{
+ glusterfs_ctx_t *ctx = arg;
+
+ event_dispatch(ctx->event_pool);
+ return NULL;
+}
+
+glusterfs_ctx_t *gluster_init(char *volfile)
+{
+ int ret = 0;
+ pthread_t thread;
+
+ ret = glusterfs_this_init();
+ if (ret) {
+ goto out;
+ }
+
+ ret = gluster_ctx_init();
+ if (ret) {
+ goto out;
+ }
+
+ ret = glusterfs_uuid_buf_init();
+ if (ret) {
+ goto out;
+ }
+
+ ret = glusterfs_lkowner_buf_init();
+ if (ret) {
+ goto out;
+ }
+
+ /* FIX: Without an explicit log file, the log is put to console! */
+ ret = gluster_logging_init("/tmp/qemu-gluster.log");
+ if (ret) {
+ goto out;
+ }
+
+ ret = gluster_master_setup(gctx);
+ if (ret) {
+ goto out;
+ }
+
+ ret = gluster_graph_init(volfile);
+ if (ret) {
+ goto out;
+ }
+
+ ret = pthread_create(&thread, NULL, gluster_handle_poll,
+ (void *)gctx);
+ if (ret) {
+ goto out;
+ }
+ return gctx;
+
+out:
+ if (gctx) {
+ if (gctx->master) {
+ xlator_destroy(gctx->master);
+ }
+ if (gctx->active) {
+ glusterfs_graph_destroy(gctx->active);
+ }
+ }
+ gluster_ctx_destroy();
+ return NULL;
+}
diff --git a/block/gluster-helpers.h b/block/gluster-helpers.h
new file mode 100644
index 0000000..612f0f2
--- /dev/null
+++ b/block/gluster-helpers.h
@@ -0,0 +1,40 @@
+/*
+ * Helper routines for GlusterFS backend
+ * (Based on libglusterfsclient)
+ *
+ * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/iobuf.h>
+#include <glusterfs/stack.h>
+#include <glusterfs/event.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/mem-pool.h>
+#include <glusterfs/globals.h>
+#include <glusterfs/fd.h>
+
+#include <libgen.h>
+
+typedef void * gluster_file_t;
+
+typedef struct {
+ char *buf;
+ size_t size;
+ off_t offset;
+ int ret;
+ void (*completion_fn)(void *arg);
+ void *opaque;
+} gluster_aiocb_t;
+
+extern glusterfs_ctx_t *gluster_init(char *volfile);
+extern int gluster_aio_readv(fd_t *fd, gluster_aiocb_t *gaiocb);
+extern int gluster_aio_writev(fd_t *fd, gluster_aiocb_t *gaiocb);
+extern gluster_file_t gluster_open(const char *path, int flags, int mode);
+extern int gluster_fstat(gluster_file_t fd, struct stat *buf);
+extern int gluster_close(gluster_file_t fd);
+extern gluster_file_t gluster_creat(const char *path, int mode);
+extern int gluster_ftruncate(gluster_file_t fd, off_t length);
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [Qemu-devel] [RFC PATCH 3/3] block: gluster as block backend
2012-06-11 14:18 [Qemu-devel] [RFC PATCH 0/3] GlusterFS support in QEMU Bharata B Rao
2012-06-11 14:19 ` [Qemu-devel] [RFC PATCH 1/3] qemu: Add a config option for GlusterFS as block backend Bharata B Rao
2012-06-11 14:20 ` [Qemu-devel] [RFC PATCH 2/3] block: GlusterFS helpers to interface with libglusterfs Bharata B Rao
@ 2012-06-11 14:21 ` Bharata B Rao
2012-06-18 17:35 ` Stefan Hajnoczi
2012-06-18 15:36 ` [Qemu-devel] [RFC PATCH 0/3] GlusterFS support in QEMU Stefan Hajnoczi
2012-07-06 5:35 ` Bharata B Rao
4 siblings, 1 reply; 17+ messages in thread
From: Bharata B Rao @ 2012-06-11 14:21 UTC (permalink / raw)
To: qemu-devel; +Cc: Amar Tumballi, Vijay Bellur
block: gluster as block backend
From: Bharata B Rao <bharata@linux.vnet.ibm.com>
This patch adds gluster as the new block backend in QEMU. This gives QEMU
the ability to boot VM images from gluster volumes.
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
---
Makefile.objs | 2
block/gluster.c | 435 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 436 insertions(+), 1 deletions(-)
create mode 100644 block/gluster.c
diff --git a/Makefile.objs b/Makefile.objs
index 25190ba..859b88a 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -59,7 +59,7 @@ block-nested-$(CONFIG_POSIX) += raw-posix.o
block-nested-$(CONFIG_LIBISCSI) += iscsi.o
block-nested-$(CONFIG_CURL) += curl.o
block-nested-$(CONFIG_RBD) += rbd.o
-block-nested-$(CONFIG_GLUSTERFS) += gluster-helpers.o
+block-nested-$(CONFIG_GLUSTERFS) += gluster-helpers.o gluster.o
block-obj-y += $(addprefix block/, $(block-nested-y))
diff --git a/block/gluster.c b/block/gluster.c
new file mode 100644
index 0000000..1566cb7
--- /dev/null
+++ b/block/gluster.c
@@ -0,0 +1,435 @@
+/*
+ * GlusterFS backend for QEMU
+ *
+ * (AIO implementation is derived from block/rbd.c)
+ *
+ * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#include "block_int.h"
+#include "gluster-helpers.h"
+
+typedef void *gluster_file_t;
+
+typedef struct glusterConf {
+ char volfile[PATH_MAX];
+ char image[PATH_MAX];
+} glusterConf;
+
+typedef struct BDRVGlusterState {
+ int fds[2];
+ int open_flags;
+ gluster_file_t fd;
+ glusterfs_ctx_t *ctx;
+ int qemu_aio_count;
+ int event_reader_pos;
+ gluster_aiocb_t *event_gaiocb;
+} BDRVGlusterState;
+
+typedef struct glusterAIOCB {
+ BlockDriverAIOCB common;
+ QEMUBH *bh;
+ QEMUIOVector *qiov;
+ int ret;
+ int write;
+ char *bounce;
+ BDRVGlusterState *s;
+ int cancelled;
+ int error;
+} glusterAIOCB;
+
+#define GLUSTER_FD_READ 0
+#define GLUSTER_FD_WRITE 1
+
+/*
+ * file=protocol:volfile:image
+ */
+static int qemu_gluster_parsename(glusterConf *c, const char *filename)
+{
+ char *file = g_strdup(filename);
+ char *token, *next_token, *saveptr;
+ int ret = 0;
+
+ /* Discard the protocol */
+ token = strtok_r(file, ":", &saveptr);
+ if (!token) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* volfile */
+ next_token = strtok_r(NULL, ":", &saveptr);
+ if (!next_token) {
+ ret = -EINVAL;
+ goto out;
+ }
+ strncpy(c->volfile, next_token, PATH_MAX);
+
+ /* image */
+ next_token = strtok_r(NULL, ":", &saveptr);
+ if (!next_token) {
+ ret = -EINVAL;
+ goto out;
+ }
+ strncpy(c->image, next_token, PATH_MAX);
+out:
+ g_free(file);
+ return ret;
+}
+
+static void gluster_aio_bh_cb(void *opaque)
+{
+ glusterAIOCB *acb = opaque;
+
+ if (!acb->write) {
+ qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
+ }
+ qemu_vfree(acb->bounce);
+ acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
+ qemu_bh_delete(acb->bh);
+ acb->bh = NULL;
+
+ qemu_aio_release(acb);
+}
+
+static void qemu_gluster_complete_aio(gluster_aiocb_t *gaiocb)
+{
+ glusterAIOCB *acb = (glusterAIOCB *)gaiocb->opaque;
+ int64_t r;
+
+ if (acb->cancelled) {
+ qemu_vfree(acb->bounce);
+ qemu_aio_release(acb);
+ goto done;
+ }
+
+ r = gaiocb->ret;
+
+ if (acb->write) {
+ if (r < 0) {
+ acb->ret = r;
+ acb->error = 1;
+ } else if (!acb->error) {
+ acb->ret = gaiocb->size;
+ }
+ } else {
+ if (r < 0) {
+ memset(gaiocb->buf, 0, gaiocb->size);
+ acb->ret = r;
+ acb->error = 1;
+ } else if (r < gaiocb->size) {
+ memset(gaiocb->buf + r, 0, gaiocb->size - r);
+ if (!acb->error) {
+ acb->ret = gaiocb->size;
+ }
+ } else if (!acb->error) {
+ acb->ret = r;
+ }
+ }
+ acb->bh = qemu_bh_new(gluster_aio_bh_cb, acb);
+ qemu_bh_schedule(acb->bh);
+done:
+ g_free(gaiocb);
+}
+
+static void qemu_gluster_aio_event_reader(void *opaque)
+{
+ BDRVGlusterState *s = opaque;
+ ssize_t ret;
+
+ do {
+ char *p = (char *)&s->event_gaiocb;
+
+ ret = read(s->fds[GLUSTER_FD_READ], p + s->event_reader_pos,
+ sizeof(s->event_gaiocb) - s->event_reader_pos);
+ if (ret > 0) {
+ s->event_reader_pos += ret;
+ if (s->event_reader_pos == sizeof(s->event_gaiocb)) {
+ s->event_reader_pos = 0;
+ qemu_gluster_complete_aio(s->event_gaiocb);
+ s->qemu_aio_count--;
+ }
+ }
+ } while (ret < 0 && errno == EINTR);
+}
+
+static int qemu_gluster_aio_flush_cb(void *opaque)
+{
+ BDRVGlusterState *s = opaque;
+
+ return (s->qemu_aio_count > 0);
+}
+
+static int qemu_gluster_open(BlockDriverState *bs, const char *filename,
+ int bdrv_flags)
+{
+ BDRVGlusterState *s = bs->opaque;
+ glusterConf *c = g_malloc(sizeof(glusterConf));
+ int ret = -1;
+
+ if (qemu_gluster_parsename(c, filename)) {
+ goto out;
+ }
+
+ s->ctx = gluster_init(c->volfile);
+ if (!s->ctx) {
+ goto out;
+ }
+
+ /* FIX: Server client handshake takes time */
+ sleep(1);
+
+ s->open_flags |= O_BINARY;
+ s->open_flags &= ~O_ACCMODE;
+ if (bdrv_flags & BDRV_O_RDWR) {
+ s->open_flags |= O_RDWR;
+ } else {
+ s->open_flags |= O_RDONLY;
+ }
+
+ /* Use O_DSYNC for write-through caching, no flags for write-back caching,
+ * and O_DIRECT for no caching. */
+ if ((bdrv_flags & BDRV_O_NOCACHE))
+ s->open_flags |= O_DIRECT;
+ if (!(bdrv_flags & BDRV_O_CACHE_WB))
+ s->open_flags |= O_DSYNC;
+
+ s->fd = gluster_open(c->image, s->open_flags, 0);
+ if (!s->fd) {
+ goto out;
+ }
+
+ ret = qemu_pipe(s->fds);
+ if (ret < 0) {
+ goto out;
+ }
+ fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
+ fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
+ qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
+ qemu_gluster_aio_event_reader, NULL, qemu_gluster_aio_flush_cb, s);
+out:
+ if (c) {
+ g_free(c);
+ }
+ if (ret < 0) {
+ gluster_close(s->fd);
+ }
+ return ret;
+}
+
+static int qemu_gluster_create(const char *filename,
+ QEMUOptionParameter *options)
+{
+ glusterConf *c = g_malloc(sizeof(glusterConf));
+ int ret = 0;
+ gluster_file_t fd;
+ int64_t total_size = 0;
+
+ ret = qemu_gluster_parsename(c, filename);
+ if (ret) {
+ goto out;
+ }
+
+ if (!gluster_init(c->volfile)) {
+ ret = -1;
+ goto out;
+ }
+
+ /* FIX: Server client handshake takes time */
+ sleep(1);
+
+ /* Read out options */
+ while (options && options->name) {
+ if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
+ total_size = options->value.n / BDRV_SECTOR_SIZE;
+ }
+ options++;
+ }
+
+ fd = gluster_creat(c->image, 0644);
+ if (!fd) {
+ ret = -errno;
+ } else {
+ if (gluster_ftruncate(fd, total_size * BDRV_SECTOR_SIZE) != 0) {
+ ret = -errno;
+ }
+ if (gluster_close(fd) != 0) {
+ ret = -errno;
+ }
+ }
+out:
+ if (c) {
+ g_free(c);
+ }
+ return ret;
+}
+
+static AIOPool gluster_aio_pool = {
+ .aiocb_size = sizeof(glusterAIOCB),
+};
+
+static int qemu_gluster_send_pipe(BDRVGlusterState *s, gluster_aiocb_t *gaiocb)
+{
+ int ret = 0;
+ while (1) {
+ fd_set wfd;
+ int fd = s->fds[GLUSTER_FD_WRITE];
+
+ ret = write(fd, (void *)&gaiocb, sizeof(gaiocb));
+ if (ret >= 0) {
+ break;
+ }
+ if (errno == EINTR) {
+ continue;
+ }
+ if (errno != EAGAIN) {
+ break;
+ }
+
+ FD_ZERO(&wfd);
+ FD_SET(fd, &wfd);
+ do {
+ ret = select(fd + 1, NULL, &wfd, NULL, NULL);
+ } while (ret < 0 && errno == EINTR);
+ }
+ return ret;
+}
+
+static void gluster_finish_aiocb(void *arg)
+{
+ int ret;
+ gluster_aiocb_t *gaiocb = (gluster_aiocb_t *)arg;
+ BDRVGlusterState *s = ((glusterAIOCB *)gaiocb->opaque)->s;
+
+ ret = qemu_gluster_send_pipe(s, gaiocb);
+ if (ret < 0) {
+ g_free(gaiocb);
+ }
+}
+
+static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque, int write)
+{
+ int ret;
+ glusterAIOCB *acb;
+ gluster_aiocb_t *gaiocb;
+ BDRVGlusterState *s = bs->opaque;
+ char *buf;
+ size_t size;
+ off_t offset;
+
+ acb = qemu_aio_get(&gluster_aio_pool, bs, cb, opaque);
+ acb->write = write;
+ acb->qiov = qiov;
+ acb->bounce = qemu_blockalign(bs, qiov->size);
+ acb->ret = 0;
+ acb->bh = NULL;
+ acb->s = s;
+
+ if (write) {
+ qemu_iovec_to_buffer(acb->qiov, acb->bounce);
+ }
+
+ buf = acb->bounce;
+ offset = sector_num * BDRV_SECTOR_SIZE;
+ size = nb_sectors * BDRV_SECTOR_SIZE;
+ s->qemu_aio_count++;
+
+ gaiocb = g_malloc(sizeof(gluster_aiocb_t));
+ gaiocb->opaque = acb;
+ gaiocb->buf = buf;
+ gaiocb->offset = offset;
+ gaiocb->size = size;
+ gaiocb->completion_fn = &gluster_finish_aiocb;
+
+ if (write) {
+ ret = gluster_aio_writev(s->fd, gaiocb);
+ } else {
+ ret = gluster_aio_readv(s->fd, gaiocb);
+ }
+
+ if (ret < 0) {
+ goto out;
+ }
+ return &acb->common;
+
+out:
+ g_free(gaiocb);
+ s->qemu_aio_count--;
+ qemu_aio_release(acb);
+ return NULL;
+}
+
+static BlockDriverAIOCB *qemu_gluster_aio_readv(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
+}
+
+static BlockDriverAIOCB *qemu_gluster_aio_writev(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
+}
+
+static int64_t qemu_gluster_getlength(BlockDriverState *bs)
+{
+ BDRVGlusterState *s = bs->opaque;
+ gluster_file_t fd = s->fd;
+ struct stat st;
+ int ret;
+
+ ret = gluster_fstat(fd, &st);
+ if (ret < 0) {
+ return -1;
+ } else {
+ return st.st_size;
+ }
+}
+
+static void qemu_gluster_close(BlockDriverState *bs)
+{
+ BDRVGlusterState *s = bs->opaque;
+
+ if (s->fd) {
+ gluster_close(s->fd);
+ s->fd = NULL;
+ }
+}
+
+static QEMUOptionParameter qemu_gluster_create_options[] = {
+ {
+ .name = BLOCK_OPT_SIZE,
+ .type = OPT_SIZE,
+ .help = "Virtual disk size"
+ },
+ { NULL }
+};
+
+static BlockDriver bdrv_gluster = {
+ .format_name = "gluster",
+ .protocol_name = "gluster",
+ .instance_size = sizeof(BDRVGlusterState),
+ .bdrv_file_open = qemu_gluster_open,
+ .bdrv_close = qemu_gluster_close,
+ .bdrv_create = qemu_gluster_create,
+ .bdrv_getlength = qemu_gluster_getlength,
+
+ .bdrv_aio_readv = qemu_gluster_aio_readv,
+ .bdrv_aio_writev = qemu_gluster_aio_writev,
+
+ .create_options = qemu_gluster_create_options,
+};
+
+static void bdrv_gluster_init(void)
+{
+ bdrv_register(&bdrv_gluster);
+}
+
+block_init(bdrv_gluster_init);
^ permalink raw reply related [flat|nested] 17+ messages in thread