qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Eric Blake <eblake@redhat.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, libguestfs@redhat.com,
	nbd@other.debian.org,
	Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Subject: [PATCH v2 06/15] nbd/server: Refactor to pass full request around
Date: Mon, 14 Nov 2022 16:48:39 -0600	[thread overview]
Message-ID: <20221114224848.2186298-7-eblake@redhat.com> (raw)
In-Reply-To: <20221114224848.2186298-1-eblake@redhat.com>

Part of NBD's 64-bit headers extension involves passing the client's
requested offset back as part of the reply header (one reason for this
change: converting absolute offsets stored in
NBD_REPLY_TYPE_OFFSET_DATA to relative offsets within the buffer is
easier if the absolute offset of the buffer is also available).  This
is a refactoring patch to pass the full request around the reply
stack, rather than just the handle, so that later patches can then
access request->from when extended headers are active.  But for this
patch, there are no semantic changes.

Signed-off-by: Eric Blake <eblake@redhat.com>
---
 nbd/server.c | 109 ++++++++++++++++++++++++++-------------------------
 1 file changed, 55 insertions(+), 54 deletions(-)

diff --git a/nbd/server.c b/nbd/server.c
index ad5c2052b5..4d1400430b 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1890,18 +1890,17 @@ static int coroutine_fn nbd_co_send_iov(NBDClient *client, struct iovec *iov,
 }

 static inline void set_be_simple_reply(NBDClient *client, struct iovec *iov,
-                                       uint64_t error, uint64_t handle)
+                                       uint64_t error, NBDRequest *request)
 {
     NBDSimpleReply *reply = iov->iov_base;

     iov->iov_len = sizeof(*reply);
     stl_be_p(&reply->magic, NBD_SIMPLE_REPLY_MAGIC);
     stl_be_p(&reply->error, error);
-    stq_be_p(&reply->handle, handle);
+    stq_be_p(&reply->handle, request->handle);
 }

-static int nbd_co_send_simple_reply(NBDClient *client,
-                                    uint64_t handle,
+static int nbd_co_send_simple_reply(NBDClient *client, NBDRequest *request,
                                     uint32_t error,
                                     void *data,
                                     size_t len,
@@ -1914,16 +1913,16 @@ static int nbd_co_send_simple_reply(NBDClient *client,
         {.iov_base = data, .iov_len = len}
     };

-    trace_nbd_co_send_simple_reply(handle, nbd_err, nbd_err_lookup(nbd_err),
-                                   len);
-    set_be_simple_reply(client, &iov[0], nbd_err, handle);
+    trace_nbd_co_send_simple_reply(request->handle, nbd_err,
+                                   nbd_err_lookup(nbd_err), len);
+    set_be_simple_reply(client, &iov[0], nbd_err, request);

     return nbd_co_send_iov(client, iov, len ? 2 : 1, errp);
 }

 static inline void set_be_chunk(NBDClient *client, struct iovec *iov,
                                 uint16_t flags, uint16_t type,
-                                uint64_t handle, uint32_t length)
+                                NBDRequest *request, uint32_t length)
 {
     NBDStructuredReplyChunk *chunk = iov->iov_base;

@@ -1931,12 +1930,12 @@ static inline void set_be_chunk(NBDClient *client, struct iovec *iov,
     stl_be_p(&chunk->magic, NBD_STRUCTURED_REPLY_MAGIC);
     stw_be_p(&chunk->flags, flags);
     stw_be_p(&chunk->type, type);
-    stq_be_p(&chunk->handle, handle);
+    stq_be_p(&chunk->handle, request->handle);
     stl_be_p(&chunk->length, length);
 }

 static int coroutine_fn nbd_co_send_structured_done(NBDClient *client,
-                                                    uint64_t handle,
+                                                    NBDRequest *request,
                                                     Error **errp)
 {
     NBDReply hdr;
@@ -1944,15 +1943,15 @@ static int coroutine_fn nbd_co_send_structured_done(NBDClient *client,
         {.iov_base = &hdr},
     };

-    trace_nbd_co_send_structured_done(handle);
+    trace_nbd_co_send_structured_done(request->handle);
     set_be_chunk(client, &iov[0], NBD_REPLY_FLAG_DONE,
-                 NBD_REPLY_TYPE_NONE, handle, 0);
+                 NBD_REPLY_TYPE_NONE, request, 0);

     return nbd_co_send_iov(client, iov, 1, errp);
 }

 static int coroutine_fn nbd_co_send_structured_read(NBDClient *client,
-                                                    uint64_t handle,
+                                                    NBDRequest *request,
                                                     uint64_t offset,
                                                     void *data,
                                                     size_t size,
@@ -1968,16 +1967,16 @@ static int coroutine_fn nbd_co_send_structured_read(NBDClient *client,
     };

     assert(size);
-    trace_nbd_co_send_structured_read(handle, offset, data, size);
+    trace_nbd_co_send_structured_read(request->handle, offset, data, size);
     set_be_chunk(client, &iov[0], final ? NBD_REPLY_FLAG_DONE : 0,
-                 NBD_REPLY_TYPE_OFFSET_DATA, handle, iov[1].iov_len + size);
+                 NBD_REPLY_TYPE_OFFSET_DATA, request, iov[1].iov_len + size);
     stq_be_p(&chunk.offset, offset);

     return nbd_co_send_iov(client, iov, 3, errp);
 }

 static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
-                                                     uint64_t handle,
+                                                     NBDRequest *request,
                                                      uint32_t error,
                                                      const char *msg,
                                                      Error **errp)
@@ -1992,10 +1991,11 @@ static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
     };

     assert(nbd_err);
-    trace_nbd_co_send_structured_error(handle, nbd_err,
+    trace_nbd_co_send_structured_error(request->handle, nbd_err,
                                        nbd_err_lookup(nbd_err), msg ? msg : "");
     set_be_chunk(client, &iov[0], NBD_REPLY_FLAG_DONE,
-                 NBD_REPLY_TYPE_ERROR, handle, iov[1].iov_len + iov[2].iov_len);
+                 NBD_REPLY_TYPE_ERROR, request,
+                 iov[1].iov_len + iov[2].iov_len);
     stl_be_p(&chunk.error, nbd_err);
     stw_be_p(&chunk.message_length, iov[2].iov_len);

@@ -2007,7 +2007,7 @@ static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
  * reported to the client, at which point this function succeeds.
  */
 static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
-                                                uint64_t handle,
+                                                NBDRequest *request,
                                                 uint64_t offset,
                                                 uint8_t *data,
                                                 size_t size,
@@ -2029,7 +2029,7 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
             char *msg = g_strdup_printf("unable to check for holes: %s",
                                         strerror(-status));

-            ret = nbd_co_send_structured_error(client, handle, -status, msg,
+            ret = nbd_co_send_structured_error(client, request, -status, msg,
                                                errp);
             g_free(msg);
             return ret;
@@ -2044,12 +2044,12 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
                 {.iov_base = &chunk, .iov_len = sizeof(chunk)},
             };

-            trace_nbd_co_send_structured_read_hole(handle, offset + progress,
+            trace_nbd_co_send_structured_read_hole(request->handle,
+                                                   offset + progress,
                                                    pnum);
             set_be_chunk(client, &iov[0],
                          final ? NBD_REPLY_FLAG_DONE : 0,
-                         NBD_REPLY_TYPE_OFFSET_HOLE,
-                         handle, iov[1].iov_len);
+                         NBD_REPLY_TYPE_OFFSET_HOLE, request, iov[1].iov_len);
             stq_be_p(&chunk.offset, offset + progress);
             stl_be_p(&chunk.length, pnum);
             ret = nbd_co_send_iov(client, iov, 2, errp);
@@ -2060,7 +2060,8 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
                 error_setg_errno(errp, -ret, "reading from file failed");
                 break;
             }
-            ret = nbd_co_send_structured_read(client, handle, offset + progress,
+            ret = nbd_co_send_structured_read(client, request,
+                                              offset + progress,
                                               data + progress, pnum, final,
                                               errp);
         }
@@ -2212,7 +2213,7 @@ static int blockalloc_to_extents(BlockDriverState *bs, uint64_t offset,
  * @ea is converted to BE by the function
  * @last controls whether NBD_REPLY_FLAG_DONE is sent.
  */
-static int nbd_co_send_extents(NBDClient *client, uint64_t handle,
+static int nbd_co_send_extents(NBDClient *client, NBDRequest *request,
                                NBDExtentArray *ea,
                                bool last, uint32_t context_id, Error **errp)
 {
@@ -2226,18 +2227,18 @@ static int nbd_co_send_extents(NBDClient *client, uint64_t handle,

     nbd_extent_array_convert_to_be(ea);

-    trace_nbd_co_send_extents(handle, ea->count, context_id, ea->total_length,
-                              last);
+    trace_nbd_co_send_extents(request->handle, ea->count, context_id,
+                              ea->total_length, last);
     set_be_chunk(client, &iov[0], last ? NBD_REPLY_FLAG_DONE : 0,
                  NBD_REPLY_TYPE_BLOCK_STATUS,
-                 handle, iov[1].iov_len + iov[2].iov_len);
+                 request, iov[1].iov_len + iov[2].iov_len);
     stl_be_p(&chunk.context_id, context_id);

     return nbd_co_send_iov(client, iov, 3, errp);
 }

 /* Get block status from the exported device and send it to the client */
-static int nbd_co_send_block_status(NBDClient *client, uint64_t handle,
+static int nbd_co_send_block_status(NBDClient *client, NBDRequest *request,
                                     BlockDriverState *bs, uint64_t offset,
                                     uint32_t length, bool dont_fragment,
                                     bool last, uint32_t context_id,
@@ -2254,10 +2255,10 @@ static int nbd_co_send_block_status(NBDClient *client, uint64_t handle,
     }
     if (ret < 0) {
         return nbd_co_send_structured_error(
-                client, handle, -ret, "can't get block status", errp);
+                client, request, -ret, "can't get block status", errp);
     }

-    return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
+    return nbd_co_send_extents(client, request, ea, last, context_id, errp);
 }

 /* Populate @ea from a dirty bitmap. */
@@ -2292,7 +2293,7 @@ static void bitmap_to_extents(BdrvDirtyBitmap *bitmap,
     bdrv_dirty_bitmap_unlock(bitmap);
 }

-static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle,
+static int nbd_co_send_bitmap(NBDClient *client, NBDRequest *request,
                               BdrvDirtyBitmap *bitmap, uint64_t offset,
                               uint32_t length, bool dont_fragment, bool last,
                               uint32_t context_id, Error **errp)
@@ -2302,7 +2303,7 @@ static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle,

     bitmap_to_extents(bitmap, offset, length, ea);

-    return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
+    return nbd_co_send_extents(client, request, ea, last, context_id, errp);
 }

 /* nbd_co_receive_request
@@ -2440,16 +2441,16 @@ static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
  * Returns 0 if connection is still live, -errno on failure to talk to client
  */
 static coroutine_fn int nbd_send_generic_reply(NBDClient *client,
-                                               uint64_t handle,
+                                               NBDRequest *request,
                                                int ret,
                                                const char *error_msg,
                                                Error **errp)
 {
     if (client->structured_reply && ret < 0) {
-        return nbd_co_send_structured_error(client, handle, -ret, error_msg,
+        return nbd_co_send_structured_error(client, request, -ret, error_msg,
                                             errp);
     } else {
-        return nbd_co_send_simple_reply(client, handle, ret < 0 ? -ret : 0,
+        return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0,
                                         NULL, 0, errp);
     }
 }
@@ -2470,7 +2471,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
     if (request->flags & NBD_CMD_FLAG_FUA) {
         ret = blk_co_flush(exp->common.blk);
         if (ret < 0) {
-            return nbd_send_generic_reply(client, request->handle, ret,
+            return nbd_send_generic_reply(client, request, ret,
                                           "flush failed", errp);
         }
     }
@@ -2478,26 +2479,26 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
     if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
         request->len)
     {
-        return nbd_co_send_sparse_read(client, request->handle, request->from,
+        return nbd_co_send_sparse_read(client, request, request->from,
                                        data, request->len, errp);
     }

     ret = blk_pread(exp->common.blk, request->from, request->len, data, 0);
     if (ret < 0) {
-        return nbd_send_generic_reply(client, request->handle, ret,
+        return nbd_send_generic_reply(client, request, ret,
                                       "reading from file failed", errp);
     }

     if (client->structured_reply) {
         if (request->len) {
-            return nbd_co_send_structured_read(client, request->handle,
+            return nbd_co_send_structured_read(client, request,
                                                request->from, data,
                                                request->len, true, errp);
         } else {
-            return nbd_co_send_structured_done(client, request->handle, errp);
+            return nbd_co_send_structured_done(client, request, errp);
         }
     } else {
-        return nbd_co_send_simple_reply(client, request->handle, 0,
+        return nbd_co_send_simple_reply(client, request, 0,
                                         data, request->len, errp);
     }
 }
@@ -2521,7 +2522,7 @@ static coroutine_fn int nbd_do_cmd_cache(NBDClient *client, NBDRequest *request,
     ret = blk_co_preadv(exp->common.blk, request->from, request->len,
                         NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);

-    return nbd_send_generic_reply(client, request->handle, ret,
+    return nbd_send_generic_reply(client, request, ret,
                                   "caching data failed", errp);
 }

@@ -2553,7 +2554,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
         assert(request->len <= NBD_MAX_BUFFER_SIZE);
         ret = blk_pwrite(exp->common.blk, request->from, request->len, data,
                          flags);
-        return nbd_send_generic_reply(client, request->handle, ret,
+        return nbd_send_generic_reply(client, request, ret,
                                       "writing to file failed", errp);

     case NBD_CMD_WRITE_ZEROES:
@@ -2569,7 +2570,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
         }
         ret = blk_pwrite_zeroes(exp->common.blk, request->from, request->len,
                                 flags);
-        return nbd_send_generic_reply(client, request->handle, ret,
+        return nbd_send_generic_reply(client, request, ret,
                                       "writing to file failed", errp);

     case NBD_CMD_DISC:
@@ -2578,7 +2579,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,

     case NBD_CMD_FLUSH:
         ret = blk_co_flush(exp->common.blk);
-        return nbd_send_generic_reply(client, request->handle, ret,
+        return nbd_send_generic_reply(client, request, ret,
                                       "flush failed", errp);

     case NBD_CMD_TRIM:
@@ -2586,12 +2587,12 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
         if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) {
             ret = blk_co_flush(exp->common.blk);
         }
-        return nbd_send_generic_reply(client, request->handle, ret,
+        return nbd_send_generic_reply(client, request, ret,
                                       "discard failed", errp);

     case NBD_CMD_BLOCK_STATUS:
         if (!request->len) {
-            return nbd_send_generic_reply(client, request->handle, -EINVAL,
+            return nbd_send_generic_reply(client, request, -EINVAL,
                                           "need non-zero length", errp);
         }
         assert(request->len <= UINT32_MAX);
@@ -2600,7 +2601,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
             int contexts_remaining = client->export_meta.count;

             if (client->export_meta.base_allocation) {
-                ret = nbd_co_send_block_status(client, request->handle,
+                ret = nbd_co_send_block_status(client, request,
                                                blk_bs(exp->common.blk),
                                                request->from,
                                                request->len, dont_fragment,
@@ -2613,7 +2614,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
             }

             if (client->export_meta.allocation_depth) {
-                ret = nbd_co_send_block_status(client, request->handle,
+                ret = nbd_co_send_block_status(client, request,
                                                blk_bs(exp->common.blk),
                                                request->from, request->len,
                                                dont_fragment,
@@ -2629,7 +2630,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
                 if (!client->export_meta.bitmaps[i]) {
                     continue;
                 }
-                ret = nbd_co_send_bitmap(client, request->handle,
+                ret = nbd_co_send_bitmap(client, request,
                                          client->exp->export_bitmaps[i],
                                          request->from, request->len,
                                          dont_fragment, !--contexts_remaining,
@@ -2643,7 +2644,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,

             return 0;
         } else {
-            return nbd_send_generic_reply(client, request->handle, -EINVAL,
+            return nbd_send_generic_reply(client, request, -EINVAL,
                                           "CMD_BLOCK_STATUS not negotiated",
                                           errp);
         }
@@ -2651,7 +2652,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
     default:
         msg = g_strdup_printf("invalid request type (%" PRIu32 ") received",
                               request->type);
-        ret = nbd_send_generic_reply(client, request->handle, -EINVAL, msg,
+        ret = nbd_send_generic_reply(client, request, -EINVAL, msg,
                                      errp);
         g_free(msg);
         return ret;
@@ -2712,7 +2713,7 @@ static coroutine_fn void nbd_trip(void *opaque)
         Error *export_err = local_err;

         local_err = NULL;
-        ret = nbd_send_generic_reply(client, request.handle, -EINVAL,
+        ret = nbd_send_generic_reply(client, &request, -EINVAL,
                                      error_get_pretty(export_err), &local_err);
         error_free(export_err);
     } else {
-- 
2.38.1



  parent reply	other threads:[~2022-11-15  0:26 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-14 22:41 [cross-project PATCH v2] NBD 64-bit extensions Eric Blake
2022-11-14 22:46 ` [PATCH v2 0/6] NBD spec changes for " Eric Blake
2022-11-14 22:46   ` [PATCH v2 1/6] spec: Recommend cap on NBD_REPLY_TYPE_BLOCK_STATUS length Eric Blake
2022-12-16 19:32     ` Vladimir Sementsov-Ogievskiy
2023-03-03 22:17       ` Eric Blake
2023-03-05  8:41         ` Wouter Verhelst
2023-03-06  8:48           ` [Libguestfs] " Laszlo Ersek
2023-03-06 13:48           ` Nir Soffer
2022-11-14 22:46   ` [PATCH v2 2/6] spec: Tweak description of maximum block size Eric Blake
2022-12-16 20:22     ` Vladimir Sementsov-Ogievskiy
2023-03-03 22:20       ` Eric Blake
2023-02-21 15:21     ` Wouter Verhelst
2023-03-03 22:26       ` Eric Blake
2023-03-05  8:45         ` Wouter Verhelst
2022-11-14 22:46   ` [PATCH v2 3/6] spec: Add NBD_OPT_EXTENDED_HEADERS Eric Blake
2022-12-19 18:26     ` Vladimir Sementsov-Ogievskiy
2023-02-22  9:49     ` Wouter Verhelst
2023-03-03 22:36       ` Eric Blake
2023-03-05  8:49         ` Wouter Verhelst
2022-11-14 22:46   ` [PATCH v2 4/6] spec: Allow 64-bit block status results Eric Blake
2022-11-14 22:46   ` [PATCH v2 5/6] spec: Introduce NBD_FLAG_BLOCK_STATUS_PAYLOAD Eric Blake
2023-02-22 10:05     ` Wouter Verhelst
2023-03-03 22:40       ` Eric Blake
2023-03-05  8:50         ` Wouter Verhelst
2022-11-14 22:46   ` [PATCH v2 6/6] RFC: spec: Introduce NBD_REPLY_TYPE_OFFSET_HOLE_EXT Eric Blake
2022-11-14 22:48 ` [PATCH v2 00/15] qemu patches for 64-bit NBD extensions Eric Blake
2022-11-14 22:48   ` [PATCH v2 01/15] nbd/client: Add safety check on chunk payload length Eric Blake
2022-11-14 22:48   ` [PATCH v2 02/15] nbd/server: Prepare for alternate-size headers Eric Blake
2022-11-14 22:48   ` [PATCH v2 03/15] nbd: Prepare for 64-bit request effect lengths Eric Blake
2022-11-14 22:48   ` [PATCH v2 04/15] nbd: Add types for extended headers Eric Blake
2022-11-14 22:48   ` [PATCH v2 05/15] nbd/server: Refactor handling of request payload Eric Blake
2022-11-14 22:48   ` Eric Blake [this message]
2022-11-14 22:48   ` [PATCH v2 07/15] nbd/server: Initial support for extended headers Eric Blake
2022-11-14 22:48   ` [PATCH v2 08/15] nbd/server: Support 64-bit block status Eric Blake
2022-11-14 22:48   ` [PATCH v2 09/15] nbd/client: Initial support for extended headers Eric Blake
2022-11-14 22:48   ` [PATCH v2 10/15] nbd/client: Accept 64-bit block status chunks Eric Blake
2022-11-14 22:48   ` [PATCH v2 11/15] nbd/client: Request extended headers during negotiation Eric Blake
2022-11-14 22:48   ` [PATCH v2 12/15] nbd/server: Prepare for per-request filtering of BLOCK_STATUS Eric Blake
2022-11-14 22:48   ` [PATCH v2 13/15] nbd/server: Add FLAG_PAYLOAD support to CMD_BLOCK_STATUS Eric Blake
2022-11-14 22:48   ` [PATCH v2 14/15] RFC: nbd/client: Accept 64-bit hole chunks Eric Blake
2022-11-14 22:48   ` [PATCH v2 15/15] RFC: nbd/server: Send 64-bit hole chunk Eric Blake
2022-11-14 22:51 ` [libnbd PATCH v2 00/23] libnbd 64-bit NBD extensions Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 01/23] block_status: Refactor array storage Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 02/23] internal: Refactor layout of replies in sbuf Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 03/23] protocol: Add definitions for extended headers Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 04/23] states: Prepare to send 64-bit requests Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 05/23] states: Prepare to receive 64-bit replies Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 06/23] states: Break deadlock if server goofs on extended replies Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 07/23] generator: Add struct nbd_extent in prep for 64-bit extents Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 08/23] block_status: Track 64-bit extents internally Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 09/23] block_status: Accept 64-bit extents during block status Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 10/23] api: Add [aio_]nbd_block_status_64 Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 11/23] api: Add several functions for controlling extended headers Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 12/23] copy: Update nbdcopy to use 64-bit block status Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 13/23] dump: Update nbddump " Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 14/23] info: Expose extended-headers support through nbdinfo Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 15/23] info: Update nbdinfo --map to use 64-bit block status Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 16/23] examples: Update copy-libev " Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 17/23] ocaml: Add example for 64-bit extents Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 18/23] generator: Actually request extended headers Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 19/23] api: Add nbd_[aio_]opt_extended_headers() Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 20/23] interop: Add test of 64-bit block status Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 21/23] api: Add nbd_can_block_status_payload() Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 22/23] api: Add nbd_[aio_]block_status_filter() Eric Blake
2022-11-14 22:51   ` [libnbd PATCH v2 23/23] RFC: pread: Accept 64-bit holes Eric Blake

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221114224848.2186298-7-eblake@redhat.com \
    --to=eblake@redhat.com \
    --cc=libguestfs@redhat.com \
    --cc=nbd@other.debian.org \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=vsementsov@yandex-team.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).