qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	MORITA Kazutaka <morita.kazutaka@lab.ntt.co.jp>,
	Stefan Hajnoczi <stefanha@redhat.com>
Subject: [Qemu-devel] [PULL 20/42] sheepdog: implement .bdrv_detach/attach_aio_context()
Date: Fri,  6 Jun 2014 18:13:41 +0200	[thread overview]
Message-ID: <1402071243-16702-21-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1402071243-16702-1-git-send-email-stefanha@redhat.com>

Drop the assumption that we're using the main AioContext.  Convert
qemu_aio_set_fd_handler() to aio_set_fd_handler() and qemu_aio_wait() to
aio_poll().

The .bdrv_detach/attach_aio_context() interfaces also need to be
implemented to move the socket fd handler from the old to the new
AioContext.

Cc: MORITA Kazutaka <morita.kazutaka@lab.ntt.co.jp>
Acked-by: Liu Yuan <namei.unix@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 block/sheepdog.c | 118 +++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 80 insertions(+), 38 deletions(-)

diff --git a/block/sheepdog.c b/block/sheepdog.c
index 4ecbf5f..9175cc2 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -314,6 +314,7 @@ struct SheepdogAIOCB {
 
 typedef struct BDRVSheepdogState {
     BlockDriverState *bs;
+    AioContext *aio_context;
 
     SheepdogInode inode;
 
@@ -496,7 +497,7 @@ static void sd_aio_cancel(BlockDriverAIOCB *blockacb)
             sd_finish_aiocb(acb);
             return;
         }
-        qemu_aio_wait();
+        aio_poll(s->aio_context, true);
     }
 }
 
@@ -578,6 +579,7 @@ static void restart_co_req(void *opaque)
 
 typedef struct SheepdogReqCo {
     int sockfd;
+    AioContext *aio_context;
     SheepdogReq *hdr;
     void *data;
     unsigned int *wlen;
@@ -598,14 +600,14 @@ static coroutine_fn void do_co_req(void *opaque)
     unsigned int *rlen = srco->rlen;
 
     co = qemu_coroutine_self();
-    qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co);
+    aio_set_fd_handler(srco->aio_context, sockfd, NULL, restart_co_req, co);
 
     ret = send_co_req(sockfd, hdr, data, wlen);
     if (ret < 0) {
         goto out;
     }
 
-    qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co);
+    aio_set_fd_handler(srco->aio_context, sockfd, restart_co_req, NULL, co);
 
     ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
     if (ret != sizeof(*hdr)) {
@@ -630,18 +632,19 @@ static coroutine_fn void do_co_req(void *opaque)
 out:
     /* there is at most one request for this sockfd, so it is safe to
      * set each handler to NULL. */
-    qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL);
+    aio_set_fd_handler(srco->aio_context, sockfd, NULL, NULL, NULL);
 
     srco->ret = ret;
     srco->finished = true;
 }
 
-static int do_req(int sockfd, SheepdogReq *hdr, void *data,
-                  unsigned int *wlen, unsigned int *rlen)
+static int do_req(int sockfd, AioContext *aio_context, SheepdogReq *hdr,
+                  void *data, unsigned int *wlen, unsigned int *rlen)
 {
     Coroutine *co;
     SheepdogReqCo srco = {
         .sockfd = sockfd,
+        .aio_context = aio_context,
         .hdr = hdr,
         .data = data,
         .wlen = wlen,
@@ -656,7 +659,7 @@ static int do_req(int sockfd, SheepdogReq *hdr, void *data,
         co = qemu_coroutine_create(do_co_req);
         qemu_coroutine_enter(co, &srco);
         while (!srco.finished) {
-            qemu_aio_wait();
+            aio_poll(aio_context, true);
         }
     }
 
@@ -709,7 +712,7 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
     BDRVSheepdogState *s = opaque;
     AIOReq *aio_req, *next;
 
-    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
+    aio_set_fd_handler(s->aio_context, s->fd, NULL, NULL, NULL);
     close(s->fd);
     s->fd = -1;
 
@@ -922,7 +925,7 @@ static int get_sheep_fd(BDRVSheepdogState *s, Error **errp)
         return fd;
     }
 
-    qemu_aio_set_fd_handler(fd, co_read_response, NULL, s);
+    aio_set_fd_handler(s->aio_context, fd, co_read_response, NULL, s);
     return fd;
 }
 
@@ -1092,7 +1095,7 @@ static int find_vdi_name(BDRVSheepdogState *s, const char *filename,
     hdr.snapid = snapid;
     hdr.flags = SD_FLAG_CMD_WRITE;
 
-    ret = do_req(fd, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
+    ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
     if (ret) {
         error_setg_errno(errp, -ret, "cannot get vdi info");
         goto out;
@@ -1173,7 +1176,8 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
 
     qemu_co_mutex_lock(&s->lock);
     s->co_send = qemu_coroutine_self();
-    qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s);
+    aio_set_fd_handler(s->aio_context, s->fd,
+                       co_read_response, co_write_request, s);
     socket_set_cork(s->fd, 1);
 
     /* send a header */
@@ -1191,12 +1195,13 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
     }
 out:
     socket_set_cork(s->fd, 0);
-    qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s);
+    aio_set_fd_handler(s->aio_context, s->fd, co_read_response, NULL, s);
     s->co_send = NULL;
     qemu_co_mutex_unlock(&s->lock);
 }
 
-static int read_write_object(int fd, char *buf, uint64_t oid, uint8_t copies,
+static int read_write_object(int fd, AioContext *aio_context, char *buf,
+                             uint64_t oid, uint8_t copies,
                              unsigned int datalen, uint64_t offset,
                              bool write, bool create, uint32_t cache_flags)
 {
@@ -1229,7 +1234,7 @@ static int read_write_object(int fd, char *buf, uint64_t oid, uint8_t copies,
     hdr.offset = offset;
     hdr.copies = copies;
 
-    ret = do_req(fd, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
+    ret = do_req(fd, aio_context, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
     if (ret) {
         error_report("failed to send a request to the sheep");
         return ret;
@@ -1244,19 +1249,23 @@ static int read_write_object(int fd, char *buf, uint64_t oid, uint8_t copies,
     }
 }
 
-static int read_object(int fd, char *buf, uint64_t oid, uint8_t copies,
+static int read_object(int fd, AioContext *aio_context, char *buf,
+                       uint64_t oid, uint8_t copies,
                        unsigned int datalen, uint64_t offset,
                        uint32_t cache_flags)
 {
-    return read_write_object(fd, buf, oid, copies, datalen, offset, false,
+    return read_write_object(fd, aio_context, buf, oid, copies,
+                             datalen, offset, false,
                              false, cache_flags);
 }
 
-static int write_object(int fd, char *buf, uint64_t oid, uint8_t copies,
+static int write_object(int fd, AioContext *aio_context, char *buf,
+                        uint64_t oid, uint8_t copies,
                         unsigned int datalen, uint64_t offset, bool create,
                         uint32_t cache_flags)
 {
-    return read_write_object(fd, buf, oid, copies, datalen, offset, true,
+    return read_write_object(fd, aio_context, buf, oid, copies,
+                             datalen, offset, true,
                              create, cache_flags);
 }
 
@@ -1284,7 +1293,7 @@ static int reload_inode(BDRVSheepdogState *s, uint32_t snapid, const char *tag)
         goto out;
     }
 
-    ret = read_object(fd, (char *)inode, vid_to_vdi_oid(vid),
+    ret = read_object(fd, s->aio_context, (char *)inode, vid_to_vdi_oid(vid),
                       s->inode.nr_copies, sizeof(*inode), 0, s->cache_flags);
     if (ret < 0) {
         goto out;
@@ -1359,6 +1368,22 @@ out:
     }
 }
 
+static void sd_detach_aio_context(BlockDriverState *bs)
+{
+    BDRVSheepdogState *s = bs->opaque;
+
+    aio_set_fd_handler(s->aio_context, s->fd, NULL, NULL, NULL);
+}
+
+static void sd_attach_aio_context(BlockDriverState *bs,
+                                  AioContext *new_context)
+{
+    BDRVSheepdogState *s = bs->opaque;
+
+    s->aio_context = new_context;
+    aio_set_fd_handler(new_context, s->fd, co_read_response, NULL, s);
+}
+
 /* TODO Convert to fine grained options */
 static QemuOptsList runtime_opts = {
     .name = "sheepdog",
@@ -1387,6 +1412,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
     const char *filename;
 
     s->bs = bs;
+    s->aio_context = bdrv_get_aio_context(bs);
 
     opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
     qemu_opts_absorb_qdict(opts, options, &local_err);
@@ -1448,8 +1474,8 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
     }
 
     buf = g_malloc(SD_INODE_SIZE);
-    ret = read_object(fd, buf, vid_to_vdi_oid(vid), 0, SD_INODE_SIZE, 0,
-                      s->cache_flags);
+    ret = read_object(fd, s->aio_context, buf, vid_to_vdi_oid(vid),
+                      0, SD_INODE_SIZE, 0, s->cache_flags);
 
     closesocket(fd);
 
@@ -1469,7 +1495,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
     g_free(buf);
     return 0;
 out:
-    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
+    aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd, NULL, NULL, NULL);
     if (s->fd >= 0) {
         closesocket(s->fd);
     }
@@ -1512,7 +1538,7 @@ static int do_sd_create(BDRVSheepdogState *s, uint32_t *vdi_id, int snapshot,
     hdr.copy_policy = s->inode.copy_policy;
     hdr.copies = s->inode.nr_copies;
 
-    ret = do_req(fd, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
+    ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, buf, &wlen, &rlen);
 
     closesocket(fd);
 
@@ -1766,7 +1792,8 @@ static void sd_close(BlockDriverState *bs)
     hdr.data_length = wlen;
     hdr.flags = SD_FLAG_CMD_WRITE;
 
-    ret = do_req(fd, (SheepdogReq *)&hdr, s->name, &wlen, &rlen);
+    ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr,
+                 s->name, &wlen, &rlen);
 
     closesocket(fd);
 
@@ -1775,7 +1802,7 @@ static void sd_close(BlockDriverState *bs)
         error_report("%s, %s", sd_strerror(rsp->result), s->name);
     }
 
-    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
+    aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd, NULL, NULL, NULL);
     closesocket(s->fd);
     g_free(s->host_spec);
 }
@@ -1812,8 +1839,9 @@ static int sd_truncate(BlockDriverState *bs, int64_t offset)
     /* we don't need to update entire object */
     datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id);
     s->inode.vdi_size = offset;
-    ret = write_object(fd, (char *)&s->inode, vid_to_vdi_oid(s->inode.vdi_id),
-                       s->inode.nr_copies, datalen, 0, false, s->cache_flags);
+    ret = write_object(fd, s->aio_context, (char *)&s->inode,
+                       vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies,
+                       datalen, 0, false, s->cache_flags);
     close(fd);
 
     if (ret < 0) {
@@ -1882,7 +1910,8 @@ static bool sd_delete(BDRVSheepdogState *s)
         return false;
     }
 
-    ret = do_req(fd, (SheepdogReq *)&hdr, s->name, &wlen, &rlen);
+    ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr,
+                 s->name, &wlen, &rlen);
     closesocket(fd);
     if (ret) {
         return false;
@@ -1939,8 +1968,8 @@ static int sd_create_branch(BDRVSheepdogState *s)
         goto out;
     }
 
-    ret = read_object(fd, buf, vid_to_vdi_oid(vid), s->inode.nr_copies,
-                      SD_INODE_SIZE, 0, s->cache_flags);
+    ret = read_object(fd, s->aio_context, buf, vid_to_vdi_oid(vid),
+                      s->inode.nr_copies, SD_INODE_SIZE, 0, s->cache_flags);
 
     closesocket(fd);
 
@@ -2187,8 +2216,9 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
         goto cleanup;
     }
 
-    ret = write_object(fd, (char *)&s->inode, vid_to_vdi_oid(s->inode.vdi_id),
-                       s->inode.nr_copies, datalen, 0, false, s->cache_flags);
+    ret = write_object(fd, s->aio_context, (char *)&s->inode,
+                       vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies,
+                       datalen, 0, false, s->cache_flags);
     if (ret < 0) {
         error_report("failed to write snapshot's inode.");
         goto cleanup;
@@ -2203,8 +2233,9 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
         goto cleanup;
     }
 
-    ret = read_object(fd, (char *)inode, vid_to_vdi_oid(new_vid),
-                      s->inode.nr_copies, datalen, 0, s->cache_flags);
+    ret = read_object(fd, s->aio_context, (char *)inode,
+                      vid_to_vdi_oid(new_vid), s->inode.nr_copies, datalen, 0,
+                      s->cache_flags);
 
     if (ret < 0) {
         error_report("failed to read new inode info. %s", strerror(errno));
@@ -2311,7 +2342,8 @@ static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab)
     req.opcode = SD_OP_READ_VDIS;
     req.data_length = max;
 
-    ret = do_req(fd, (SheepdogReq *)&req, vdi_inuse, &wlen, &rlen);
+    ret = do_req(fd, s->aio_context, (SheepdogReq *)&req,
+                 vdi_inuse, &wlen, &rlen);
 
     closesocket(fd);
     if (ret) {
@@ -2338,7 +2370,8 @@ static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab)
         }
 
         /* we don't need to read entire object */
-        ret = read_object(fd, (char *)&inode, vid_to_vdi_oid(vid),
+        ret = read_object(fd, s->aio_context, (char *)&inode,
+                          vid_to_vdi_oid(vid),
                           0, SD_INODE_SIZE - sizeof(inode.data_vdi_id), 0,
                           s->cache_flags);
 
@@ -2403,11 +2436,11 @@ static int do_load_save_vmstate(BDRVSheepdogState *s, uint8_t *data,
 
         create = (offset == 0);
         if (load) {
-            ret = read_object(fd, (char *)data, vmstate_oid,
+            ret = read_object(fd, s->aio_context, (char *)data, vmstate_oid,
                               s->inode.nr_copies, data_len, offset,
                               s->cache_flags);
         } else {
-            ret = write_object(fd, (char *)data, vmstate_oid,
+            ret = write_object(fd, s->aio_context, (char *)data, vmstate_oid,
                                s->inode.nr_copies, data_len, offset, create,
                                s->cache_flags);
         }
@@ -2580,6 +2613,9 @@ static BlockDriver bdrv_sheepdog = {
     .bdrv_save_vmstate  = sd_save_vmstate,
     .bdrv_load_vmstate  = sd_load_vmstate,
 
+    .bdrv_detach_aio_context = sd_detach_aio_context,
+    .bdrv_attach_aio_context = sd_attach_aio_context,
+
     .create_options = sd_create_options,
 };
 
@@ -2610,6 +2646,9 @@ static BlockDriver bdrv_sheepdog_tcp = {
     .bdrv_save_vmstate  = sd_save_vmstate,
     .bdrv_load_vmstate  = sd_load_vmstate,
 
+    .bdrv_detach_aio_context = sd_detach_aio_context,
+    .bdrv_attach_aio_context = sd_attach_aio_context,
+
     .create_options = sd_create_options,
 };
 
@@ -2640,6 +2679,9 @@ static BlockDriver bdrv_sheepdog_unix = {
     .bdrv_save_vmstate  = sd_save_vmstate,
     .bdrv_load_vmstate  = sd_load_vmstate,
 
+    .bdrv_detach_aio_context = sd_detach_aio_context,
+    .bdrv_attach_aio_context = sd_attach_aio_context,
+
     .create_options = sd_create_options,
 };
 
-- 
1.9.3

  parent reply	other threads:[~2014-06-06 16:15 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-06 16:13 [Qemu-devel] [PULL 00/42] Block patches Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 01/42] aio: fix qemu_bh_schedule() bh->ctx race condition Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 02/42] block: use BlockDriverState AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 03/42] block: acquire AioContext in bdrv_*_all() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 04/42] block: acquire AioContext in bdrv_drain_all() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 05/42] block: add bdrv_set_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 06/42] blkdebug: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 07/42] blkverify: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 08/42] curl: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 09/42] gluster: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 10/42] iscsi: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 11/42] nbd: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 12/42] nfs: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 13/42] qed: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 14/42] quorum: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 15/42] block/raw-posix: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 16/42] block/linux-aio: fix memory and fd leak Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 17/42] block/raw-win32: create one QEMUWin32AIOState per BDRVRawState Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 18/42] block/raw-win32: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 19/42] rbd: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` Stefan Hajnoczi [this message]
2014-06-06 16:13 ` [Qemu-devel] [PULL 21/42] ssh: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 22/42] vmdk: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 23/42] dataplane: use the QEMU block layer for I/O Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 24/42] dataplane: delete IOQueue since it is no longer used Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 25/42] dataplane: implement async flush Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 26/42] raw-posix: drop raw_get_aio_fd() since it is no longer used Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 27/42] block: Move declaration of bdrv_get_aio_context to block.h Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 28/42] virtio-blk: Allow config-wce in dataplane Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 29/42] virtio-blk: Factor out virtio_blk_handle_scsi_req from virtio_blk_handle_scsi Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 30/42] dataplane: Support VIRTIO_BLK_T_SCSI_CMD Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 31/42] throttle: add throttle_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 32/42] throttle: add detach/attach test case Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 33/42] blockdev: acquire AioContext in block_set_io_throttle Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 34/42] block: fix wrong order in live block migration setup Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 35/42] qemu-img: Document check exit codes Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 36/42] rbd: Fix leaks in rbd_start_aio() error path Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 37/42] sheepdog: fix vdi object update after live snapshot Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 38/42] sheepdog: reload only header in a case of " Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 39/42] qapi: Extract qapi/common.json definitions Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 40/42] qapi: create two block related json modules Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 41/42] qapi: Extract qapi/block-core.json definitions Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 42/42] qapi: Extract qapi/block.json definitions Stefan Hajnoczi
2014-06-09 12:04 ` [Qemu-devel] [PULL 00/42] Block patches Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1402071243-16702-21-git-send-email-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=morita.kazutaka@lab.ntt.co.jp \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).