qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>, Peter Xu <peterx@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	Juan Quintela <quintela@redhat.com>,
	Thomas Huth <thuth@redhat.com>,
	Li Zhijian <lizhijian@fujitsu.com>,
	Leonardo Bras <leobras@redhat.com>,
	Eric Blake <eblake@redhat.com>, Fabiano Rosas <farosas@suse.de>
Subject: [PULL 41/65] migration/rdma: Drop "@errp is clear" guards around error_setg()
Date: Wed, 11 Oct 2023 11:21:39 +0200	[thread overview]
Message-ID: <20231011092203.1266-42-quintela@redhat.com> (raw)
In-Reply-To: <20231011092203.1266-1-quintela@redhat.com>

From: Markus Armbruster <armbru@redhat.com>

These guards are all redundant now.

Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20230928132019.2544702-35-armbru@redhat.com>
---
 migration/rdma.c | 164 +++++++++++++++--------------------------------
 1 file changed, 51 insertions(+), 113 deletions(-)

diff --git a/migration/rdma.c b/migration/rdma.c
index 3fb899f963..fdb527af39 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -858,10 +858,8 @@ static int qemu_rdma_broken_ipv6_kernel(struct ibv_context *verbs, Error **errp)
 
             if (ibv_query_port(verbs, 1, &port_attr)) {
                 ibv_close_device(verbs);
-                if (errp && !*errp) {
-                    error_setg(errp,
-                               "RDMA ERROR: Could not query initial IB port");
-                }
+                error_setg(errp,
+                           "RDMA ERROR: Could not query initial IB port");
                 return -1;
             }
 
@@ -884,12 +882,10 @@ static int qemu_rdma_broken_ipv6_kernel(struct ibv_context *verbs, Error **errp)
                                 " migrate over the IB fabric until the kernel "
                                 " fixes the bug.\n");
             } else {
-                if (errp && !*errp) {
-                    error_setg(errp, "RDMA ERROR: "
-                               "You only have RoCE / iWARP devices in your systems"
-                               " and your management software has specified '[::]'"
-                               ", but IPv6 over RoCE / iWARP is not supported in Linux.");
-                }
+                error_setg(errp, "RDMA ERROR: "
+                           "You only have RoCE / iWARP devices in your systems"
+                           " and your management software has specified '[::]'"
+                           ", but IPv6 over RoCE / iWARP is not supported in Linux.");
                 return -1;
             }
         }
@@ -905,18 +901,14 @@ static int qemu_rdma_broken_ipv6_kernel(struct ibv_context *verbs, Error **errp)
 
     /* IB ports start with 1, not 0 */
     if (ibv_query_port(verbs, 1, &port_attr)) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: Could not query initial IB port");
-        }
+        error_setg(errp, "RDMA ERROR: Could not query initial IB port");
         return -1;
     }
 
     if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: "
-                       "Linux kernel's RoCE / iWARP does not support IPv6 "
-                       "(but patches on linux-rdma in progress)");
-        }
+        error_setg(errp, "RDMA ERROR: "
+                   "Linux kernel's RoCE / iWARP does not support IPv6 "
+                   "(but patches on linux-rdma in progress)");
         return -1;
     }
 
@@ -941,27 +933,21 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
     struct rdma_addrinfo *e;
 
     if (rdma->host == NULL || !strcmp(rdma->host, "")) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: RDMA hostname has not been set");
-        }
+        error_setg(errp, "RDMA ERROR: RDMA hostname has not been set");
         return -1;
     }
 
     /* create CM channel */
     rdma->channel = rdma_create_event_channel();
     if (!rdma->channel) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not create CM channel");
-        }
+        error_setg(errp, "RDMA ERROR: could not create CM channel");
         return -1;
     }
 
     /* create CM id */
     ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not create channel id");
-        }
+        error_setg(errp, "RDMA ERROR: could not create channel id");
         goto err_resolve_create_id;
     }
 
@@ -970,10 +956,8 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
 
     ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
     if (ret) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not rdma_getaddrinfo address %s",
-                       rdma->host);
-        }
+        error_setg(errp, "RDMA ERROR: could not rdma_getaddrinfo address %s",
+                   rdma->host);
         goto err_resolve_get_addr;
     }
 
@@ -1015,18 +999,14 @@ route:
 
     ret = rdma_get_cm_event(rdma->channel, &cm_event);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not perform event_addr_resolved");
-        }
+        error_setg(errp, "RDMA ERROR: could not perform event_addr_resolved");
         goto err_resolve_get_addr;
     }
 
     if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) {
-        if (errp && !*errp) {
-            error_setg(errp,
-                       "RDMA ERROR: result not equal to event_addr_resolved %s",
-                       rdma_event_str(cm_event->event));
-        }
+        error_setg(errp,
+                   "RDMA ERROR: result not equal to event_addr_resolved %s",
+                   rdma_event_str(cm_event->event));
         error_report("rdma_resolve_addr");
         rdma_ack_cm_event(cm_event);
         goto err_resolve_get_addr;
@@ -1036,25 +1016,19 @@ route:
     /* resolve route */
     ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not resolve rdma route");
-        }
+        error_setg(errp, "RDMA ERROR: could not resolve rdma route");
         goto err_resolve_get_addr;
     }
 
     ret = rdma_get_cm_event(rdma->channel, &cm_event);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not perform event_route_resolved");
-        }
+        error_setg(errp, "RDMA ERROR: could not perform event_route_resolved");
         goto err_resolve_get_addr;
     }
     if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: "
-                       "result not equal to event_route_resolved: %s",
-                       rdma_event_str(cm_event->event));
-        }
+        error_setg(errp, "RDMA ERROR: "
+                   "result not equal to event_route_resolved: %s",
+                   rdma_event_str(cm_event->event));
         rdma_ack_cm_event(cm_event);
         goto err_resolve_get_addr;
     }
@@ -2525,20 +2499,16 @@ static int qemu_rdma_source_init(RDMAContext *rdma, bool pin_all, Error **errp)
 
     ret = qemu_rdma_alloc_pd_cq(rdma);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: "
-                       "rdma migration: error allocating pd and cq! Your mlock()"
-                       " limits may be too low. Please check $ ulimit -a # and "
-                       "search for 'ulimit -l' in the output");
-        }
+        error_setg(errp, "RDMA ERROR: "
+                   "rdma migration: error allocating pd and cq! Your mlock()"
+                   " limits may be too low. Please check $ ulimit -a # and "
+                   "search for 'ulimit -l' in the output");
         goto err_rdma_source_init;
     }
 
     ret = qemu_rdma_alloc_qp(rdma);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: rdma migration: error allocating qp!");
-        }
+        error_setg(errp, "RDMA ERROR: rdma migration: error allocating qp!");
         goto err_rdma_source_init;
     }
 
@@ -2555,11 +2525,9 @@ static int qemu_rdma_source_init(RDMAContext *rdma, bool pin_all, Error **errp)
     for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
         ret = qemu_rdma_reg_control(rdma, idx);
         if (ret < 0) {
-            if (errp && !*errp) {
-                error_setg(errp,
-                           "RDMA ERROR: rdma migration: error registering %d control!",
-                           idx);
-            }
+            error_setg(errp,
+                       "RDMA ERROR: rdma migration: error registering %d control!",
+                       idx);
             goto err_rdma_source_init;
         }
     }
@@ -2587,29 +2555,21 @@ static int qemu_get_cm_event_timeout(RDMAContext *rdma,
     } while (ret < 0 && errno == EINTR);
 
     if (ret == 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: poll cm event timeout");
-        }
+        error_setg(errp, "RDMA ERROR: poll cm event timeout");
         return -1;
     } else if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: failed to poll cm event, errno=%i",
-                       errno);
-        }
+        error_setg(errp, "RDMA ERROR: failed to poll cm event, errno=%i",
+                   errno);
         return -1;
     } else if (poll_fd.revents & POLLIN) {
         if (rdma_get_cm_event(rdma->channel, cm_event) < 0) {
-            if (errp && !*errp) {
-                error_setg(errp, "RDMA ERROR: failed to get cm event");
-            }
+            error_setg(errp, "RDMA ERROR: failed to get cm event");
             return -1;
         }
         return 0;
     } else {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: no POLLIN event, revent=%x",
-                       poll_fd.revents);
-        }
+        error_setg(errp, "RDMA ERROR: no POLLIN event, revent=%x",
+                   poll_fd.revents);
         return -1;
     }
 }
@@ -2642,18 +2602,14 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool return_path,
 
     ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: posting second control recv");
-        }
+        error_setg(errp, "RDMA ERROR: posting second control recv");
         goto err_rdma_source_connect;
     }
 
     ret = rdma_connect(rdma->cm_id, &conn_param);
     if (ret < 0) {
         perror("rdma_connect");
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: connecting to destination!");
-        }
+        error_setg(errp, "RDMA ERROR: connecting to destination!");
         goto err_rdma_source_connect;
     }
 
@@ -2662,9 +2618,7 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool return_path,
     } else {
         ret = rdma_get_cm_event(rdma->channel, &cm_event);
         if (ret < 0) {
-            if (errp && !*errp) {
-                error_setg(errp, "RDMA ERROR: failed to get cm event");
-            }
+            error_setg(errp, "RDMA ERROR: failed to get cm event");
         }
     }
     if (ret < 0) {
@@ -2679,9 +2633,7 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool return_path,
 
     if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
         error_report("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: connecting to destination!");
-        }
+        error_setg(errp, "RDMA ERROR: connecting to destination!");
         rdma_ack_cm_event(cm_event);
         goto err_rdma_source_connect;
     }
@@ -2729,18 +2681,14 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
     }
 
     if (!rdma->host || !rdma->host[0]) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: RDMA host is not set!");
-        }
+        error_setg(errp, "RDMA ERROR: RDMA host is not set!");
         rdma->errored = true;
         return -1;
     }
     /* create CM channel */
     rdma->channel = rdma_create_event_channel();
     if (!rdma->channel) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not create rdma event channel");
-        }
+        error_setg(errp, "RDMA ERROR: could not create rdma event channel");
         rdma->errored = true;
         return -1;
     }
@@ -2748,9 +2696,7 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
     /* create CM id */
     ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not create cm_id!");
-        }
+        error_setg(errp, "RDMA ERROR: could not create cm_id!");
         goto err_dest_init_create_listen_id;
     }
 
@@ -2759,19 +2705,15 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
 
     ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
     if (ret) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: could not rdma_getaddrinfo address %s",
-                       rdma->host);
-        }
+        error_setg(errp, "RDMA ERROR: could not rdma_getaddrinfo address %s",
+                   rdma->host);
         goto err_dest_init_bind_addr;
     }
 
     ret = rdma_set_option(listen_id, RDMA_OPTION_ID, RDMA_OPTION_ID_REUSEADDR,
                           &reuse, sizeof reuse);
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: Error: could not set REUSEADDR option");
-        }
+        error_setg(errp, "RDMA ERROR: Error: could not set REUSEADDR option");
         goto err_dest_init_bind_addr;
     }
 
@@ -2855,10 +2797,8 @@ static RDMAContext *qemu_rdma_data_init(const char *host_port, Error **errp)
         rdma->host = g_strdup(addr->host);
         rdma->host_port = g_strdup(host_port);
     } else {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: bad RDMA migration address '%s'",
-                       host_port);
-        }
+        error_setg(errp, "RDMA ERROR: bad RDMA migration address '%s'",
+                   host_port);
         g_free(rdma);
         rdma = NULL;
     }
@@ -4232,9 +4172,7 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
     ret = rdma_listen(rdma->listen_id, 5);
 
     if (ret < 0) {
-        if (errp && !*errp) {
-            error_setg(errp, "RDMA ERROR: listening on socket!");
-        }
+        error_setg(errp, "RDMA ERROR: listening on socket!");
         goto cleanup_rdma;
     }
 
-- 
2.41.0



  parent reply	other threads:[~2023-10-11  9:29 UTC|newest]

Thread overview: 70+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-11  9:20 [PULL 00/65] Migration 20231011 patches Juan Quintela
2023-10-11  9:20 ` [PULL 01/65] migration/qmp: Fix crash on setting tls-authz with null Juan Quintela
2023-10-11  9:21 ` [PULL 02/65] tests/qtest: migration: Expose migrate_set_capability Juan Quintela
2023-10-11  9:21 ` [PULL 03/65] tests/qtest: migration: Add migrate_incoming_qmp helper Juan Quintela
2023-10-11  9:21 ` [PULL 04/65] tests/qtest: migration: Use migrate_incoming_qmp where appropriate Juan Quintela
2023-10-11  9:21 ` [PULL 05/65] migration: Set migration status early in incoming side Juan Quintela
2023-10-11  9:21 ` [PULL 06/65] tests/qtest: migration: Add support for negative testing of qmp_migrate Juan Quintela
2023-10-11 13:04   ` Fabiano Rosas
2023-10-11 14:11     ` Juan Quintela
2023-10-11 20:30     ` Juan Quintela
2023-10-11  9:21 ` [PULL 07/65] migration: Allow RECOVER->PAUSED convertion for dest qemu Juan Quintela
2023-10-11  9:21 ` [PULL 08/65] migration/rdma: Clean up qemu_rdma_poll()'s return type Juan Quintela
2023-10-11  9:21 ` [PULL 09/65] migration/rdma: Clean up qemu_rdma_data_init()'s " Juan Quintela
2023-10-11  9:21 ` [PULL 10/65] migration/rdma: Clean up rdma_delete_block()'s " Juan Quintela
2023-10-11  9:21 ` [PULL 11/65] migration/rdma: Drop fragile wr_id formatting Juan Quintela
2023-10-11  9:21 ` [PULL 12/65] migration/rdma: Consistently use uint64_t for work request IDs Juan Quintela
2023-10-11  9:21 ` [PULL 13/65] migration/rdma: Fix unwanted integer truncation Juan Quintela
2023-10-11  9:21 ` [PULL 14/65] migration/rdma: Clean up two more harmless signed vs. unsigned issues Juan Quintela
2023-10-11  9:21 ` [PULL 15/65] migration/rdma: Give qio_channel_rdma_source_funcs internal linkage Juan Quintela
2023-10-11  9:21 ` [PULL 16/65] migration/rdma: Fix qemu_rdma_accept() to return failure on errors Juan Quintela
2023-10-11  9:21 ` [PULL 17/65] migration/rdma: Put @errp parameter last Juan Quintela
2023-10-11  9:21 ` [PULL 18/65] migration/rdma: Eliminate error_propagate() Juan Quintela
2023-10-11  9:21 ` [PULL 19/65] migration/rdma: Drop rdma_add_block() error handling Juan Quintela
2023-10-11  9:21 ` [PULL 20/65] migration/rdma: Drop qemu_rdma_search_ram_block() " Juan Quintela
2023-10-11  9:21 ` [PULL 21/65] migration/rdma: Make qemu_rdma_buffer_mergeable() return bool Juan Quintela
2023-10-11  9:21 ` [PULL 22/65] migration/rdma: Use bool for two RDMAContext flags Juan Quintela
2023-10-11  9:21 ` [PULL 23/65] migration/rdma: Fix or document problematic uses of errno Juan Quintela
2023-10-11  9:21 ` [PULL 24/65] migration/rdma: Ditch useless numeric error codes in error messages Juan Quintela
2023-10-11  9:21 ` [PULL 25/65] migration/rdma: Fix io_writev(), io_readv() methods to obey contract Juan Quintela
2023-10-11  9:21 ` [PULL 26/65] migration/rdma: Replace dangerous macro CHECK_ERROR_STATE() Juan Quintela
2023-10-11  9:21 ` [PULL 27/65] migration/rdma: Fix qemu_rdma_broken_ipv6_kernel() to set error Juan Quintela
2023-10-11  9:21 ` [PULL 28/65] migration/rdma: Fix qemu_get_cm_event_timeout() to always " Juan Quintela
2023-10-11  9:21 ` [PULL 29/65] migration/rdma: Drop dead qemu_rdma_data_init() code for !@host_port Juan Quintela
2023-10-11  9:21 ` [PULL 30/65] migration/rdma: Fix QEMUFileHooks method return values Juan Quintela
2023-10-11  9:21 ` [PULL 31/65] migration/rdma: Fix rdma_getaddrinfo() error checking Juan Quintela
2023-10-11  9:21 ` [PULL 32/65] migration/rdma: Return -1 instead of negative errno code Juan Quintela
2023-10-11  9:21 ` [PULL 33/65] migration/rdma: Dumb down remaining int error values to -1 Juan Quintela
2023-10-11  9:21 ` [PULL 34/65] migration/rdma: Replace int error_state by bool errored Juan Quintela
2023-10-11  9:21 ` [PULL 35/65] migration/rdma: Drop superfluous assignments to @ret Juan Quintela
2023-10-11  9:21 ` [PULL 36/65] migration/rdma: Check negative error values the same way everywhere Juan Quintela
2023-10-11  9:21 ` [PULL 37/65] migration/rdma: Plug a memory leak and improve a message Juan Quintela
2023-10-11  9:21 ` [PULL 38/65] migration/rdma: Delete inappropriate error_report() in macro ERROR() Juan Quintela
2023-10-11  9:21 ` [PULL 39/65] migration/rdma: Retire " Juan Quintela
2023-10-11  9:21 ` [PULL 40/65] migration/rdma: Fix error handling around rdma_getaddrinfo() Juan Quintela
2023-10-11  9:21 ` Juan Quintela [this message]
2023-10-11  9:21 ` [PULL 42/65] migration/rdma: Convert qemu_rdma_exchange_recv() to Error Juan Quintela
2023-10-11  9:21 ` [PULL 43/65] migration/rdma: Convert qemu_rdma_exchange_send() " Juan Quintela
2023-10-11  9:21 ` [PULL 44/65] migration/rdma: Convert qemu_rdma_exchange_get_response() " Juan Quintela
2023-10-11  9:21 ` [PULL 45/65] migration/rdma: Convert qemu_rdma_reg_whole_ram_blocks() " Juan Quintela
2023-10-11  9:21 ` [PULL 46/65] migration/rdma: Convert qemu_rdma_write_flush() " Juan Quintela
2023-10-11  9:21 ` [PULL 47/65] migration/rdma: Convert qemu_rdma_write_one() " Juan Quintela
2023-10-11  9:21 ` [PULL 48/65] migration/rdma: Convert qemu_rdma_write() " Juan Quintela
2023-10-11  9:21 ` [PULL 49/65] migration/rdma: Convert qemu_rdma_post_send_control() " Juan Quintela
2023-10-11  9:21 ` [PULL 50/65] migration/rdma: Convert qemu_rdma_post_recv_control() " Juan Quintela
2023-10-11  9:21 ` [PULL 51/65] migration/rdma: Convert qemu_rdma_alloc_pd_cq() " Juan Quintela
2023-10-11  9:21 ` [PULL 52/65] migration/rdma: Silence qemu_rdma_resolve_host() Juan Quintela
2023-10-11  9:21 ` [PULL 53/65] migration/rdma: Silence qemu_rdma_connect() Juan Quintela
2023-10-11  9:21 ` [PULL 54/65] migration/rdma: Silence qemu_rdma_reg_control() Juan Quintela
2023-10-11  9:21 ` [PULL 55/65] migration/rdma: Don't report received completion events as error Juan Quintela
2023-10-11  9:21 ` [PULL 56/65] migration/rdma: Silence qemu_rdma_block_for_wrid() Juan Quintela
2023-10-11  9:21 ` [PULL 57/65] migration/rdma: Silence qemu_rdma_register_and_get_keys() Juan Quintela
2023-10-11  9:21 ` [PULL 58/65] migration/rdma: Downgrade qemu_rdma_cleanup() errors to warnings Juan Quintela
2023-10-11  9:21 ` [PULL 59/65] migration/rdma: Use error_report() & friends instead of stderr Juan Quintela
2023-10-11  9:21 ` [PULL 60/65] migration/rdma: Replace flawed device detail dump by tracing Juan Quintela
2023-10-11  9:21 ` [PULL 61/65] migration: Display error in query-migrate irrelevant of status Juan Quintela
2023-10-11  9:22 ` [PULL 62/65] migration: Introduce migrate_has_error() Juan Quintela
2023-10-11  9:22 ` [PULL 63/65] qemufile: Always return a verbose error Juan Quintela
2023-10-11  9:22 ` [PULL 64/65] migration: Remember num of ramblocks to sync during recovery Juan Quintela
2023-10-11  9:22 ` [PULL 65/65] migration: Add migration_rp_wait|kick() Juan Quintela
2023-10-11 17:04 ` [PULL 00/65] Migration 20231011 patches Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231011092203.1266-42-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=armbru@redhat.com \
    --cc=eblake@redhat.com \
    --cc=farosas@suse.de \
    --cc=leobras@redhat.com \
    --cc=lizhijian@fujitsu.com \
    --cc=lvivier@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).