From: Markus Armbruster <armbru@redhat.com>
To: qemu-devel@nongnu.org
Cc: quintela@redhat.com, peterx@redhat.com, leobras@redhat.com,
farosas@suse.de, lizhijian@fujitsu.com, eblake@redhat.com
Subject: [PATCH v2 29/53] migration/rdma: Check negative error values the same way everywhere
Date: Thu, 28 Sep 2023 15:19:55 +0200 [thread overview]
Message-ID: <20230928132019.2544702-30-armbru@redhat.com> (raw)
In-Reply-To: <20230928132019.2544702-1-armbru@redhat.com>
When a function returns 0 on success, negative value on error,
checking for non-zero suffices, but checking for negative is clearer.
So do that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
---
migration/rdma.c | 82 ++++++++++++++++++++++++------------------------
1 file changed, 41 insertions(+), 41 deletions(-)
diff --git a/migration/rdma.c b/migration/rdma.c
index 2af9395696..c57692e5a3 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -953,7 +953,7 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
/* create CM id */
ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "could not create channel id");
goto err_resolve_create_id;
}
@@ -974,10 +974,10 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
RDMA_RESOLVE_TIMEOUT_MS);
- if (!ret) {
+ if (ret >= 0) {
if (e->ai_family == AF_INET6) {
ret = qemu_rdma_broken_ipv6_kernel(rdma->cm_id->verbs, errp);
- if (ret) {
+ if (ret < 0) {
continue;
}
}
@@ -994,7 +994,7 @@ route:
qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id);
ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "could not perform event_addr_resolved");
goto err_resolve_get_addr;
}
@@ -1010,13 +1010,13 @@ route:
/* resolve route */
ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "could not resolve rdma route");
goto err_resolve_get_addr;
}
ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "could not perform event_route_resolved");
goto err_resolve_get_addr;
}
@@ -1124,7 +1124,7 @@ static int qemu_rdma_alloc_qp(RDMAContext *rdma)
attr.qp_type = IBV_QPT_RC;
ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
- if (ret) {
+ if (ret < 0) {
return -1;
}
@@ -1567,7 +1567,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma,
if (pfds[1].revents) {
ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (ret) {
+ if (ret < 0) {
error_report("failed to get cm event while wait "
"completion channel");
return -1;
@@ -1668,12 +1668,12 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma,
while (1) {
ret = qemu_rdma_wait_comp_channel(rdma, ch);
- if (ret) {
+ if (ret < 0) {
goto err_block_for_wrid;
}
ret = ibv_get_cq_event(ch, &cq, &cq_ctx);
- if (ret) {
+ if (ret < 0) {
/*
* FIXME perror() is problematic, because ibv_reg_mr() is
* not documented to set errno. Will go away later in
@@ -1909,7 +1909,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
*/
if (resp) {
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
- if (ret) {
+ if (ret < 0) {
error_report("rdma migration: error posting"
" extra control recv for anticipated result!");
return -1;
@@ -1920,7 +1920,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
* Post a WR to replace the one we just consumed for the READY message.
*/
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
- if (ret) {
+ if (ret < 0) {
error_report("rdma migration: error posting first control recv!");
return -1;
}
@@ -2007,7 +2007,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
* Post a new RECV work request to replace the one we just consumed.
*/
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
- if (ret) {
+ if (ret < 0) {
error_report("rdma migration: error posting second control recv!");
return -1;
}
@@ -2337,7 +2337,7 @@ static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
/* If we cannot merge it, we flush the current buffer first. */
if (!qemu_rdma_buffer_mergeable(rdma, current_addr, len)) {
ret = qemu_rdma_write_flush(f, rdma);
- if (ret) {
+ if (ret < 0) {
return -1;
}
rdma->current_length = 0;
@@ -2467,12 +2467,12 @@ static int qemu_rdma_source_init(RDMAContext *rdma, bool pin_all, Error **errp)
rdma->pin_all = pin_all;
ret = qemu_rdma_resolve_host(rdma, errp);
- if (ret) {
+ if (ret < 0) {
goto err_rdma_source_init;
}
ret = qemu_rdma_alloc_pd_cq(rdma);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "rdma migration: error allocating pd and cq! Your mlock()"
" limits may be too low. Please check $ ulimit -a # and "
"search for 'ulimit -l' in the output");
@@ -2480,7 +2480,7 @@ static int qemu_rdma_source_init(RDMAContext *rdma, bool pin_all, Error **errp)
}
ret = qemu_rdma_alloc_qp(rdma);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "rdma migration: error allocating qp!");
goto err_rdma_source_init;
}
@@ -2497,7 +2497,7 @@ static int qemu_rdma_source_init(RDMAContext *rdma, bool pin_all, Error **errp)
for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
ret = qemu_rdma_reg_control(rdma, idx);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "rdma migration: error registering %d control!",
idx);
goto err_rdma_source_init;
@@ -2571,13 +2571,13 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool return_path,
caps_to_network(&cap);
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "posting second control recv");
goto err_rdma_source_connect;
}
ret = rdma_connect(rdma->cm_id, &conn_param);
- if (ret) {
+ if (ret < 0) {
perror("rdma_connect");
ERROR(errp, "connecting to destination!");
goto err_rdma_source_connect;
@@ -2591,7 +2591,7 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool return_path,
ERROR(errp, "failed to get cm event");
}
}
- if (ret) {
+ if (ret < 0) {
/*
* FIXME perror() is wrong, because
* qemu_get_cm_event_timeout() can fail without setting errno.
@@ -2664,7 +2664,7 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
/* create CM id */
ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "could not create cm_id!");
goto err_dest_init_create_listen_id;
}
@@ -2680,7 +2680,7 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
ret = rdma_set_option(listen_id, RDMA_OPTION_ID, RDMA_OPTION_ID_REUSEADDR,
&reuse, sizeof reuse);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "Error: could not set REUSEADDR option");
goto err_dest_init_bind_addr;
}
@@ -2689,12 +2689,12 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
&((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
trace_qemu_rdma_dest_init_trying(rdma->host, ip);
ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
- if (ret) {
+ if (ret < 0) {
continue;
}
if (e->ai_family == AF_INET6) {
ret = qemu_rdma_broken_ipv6_kernel(listen_id->verbs, errp);
- if (ret) {
+ if (ret < 0) {
continue;
}
}
@@ -3334,7 +3334,7 @@ static void rdma_cm_poll_handler(void *opaque)
MigrationIncomingState *mis = migration_incoming_get_current();
ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (ret) {
+ if (ret < 0) {
error_report("get_cm_event failed %d", errno);
return;
}
@@ -3374,7 +3374,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
int idx;
ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (ret) {
+ if (ret < 0) {
goto err_rdma_dest_wait;
}
@@ -3444,13 +3444,13 @@ static int qemu_rdma_accept(RDMAContext *rdma)
qemu_rdma_dump_id("dest_init", verbs);
ret = qemu_rdma_alloc_pd_cq(rdma);
- if (ret) {
+ if (ret < 0) {
error_report("rdma migration: error allocating pd and cq!");
goto err_rdma_dest_wait;
}
ret = qemu_rdma_alloc_qp(rdma);
- if (ret) {
+ if (ret < 0) {
error_report("rdma migration: error allocating qp!");
goto err_rdma_dest_wait;
}
@@ -3459,7 +3459,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
ret = qemu_rdma_reg_control(rdma, idx);
- if (ret) {
+ if (ret < 0) {
error_report("rdma: error registering %d control", idx);
goto err_rdma_dest_wait;
}
@@ -3477,13 +3477,13 @@ static int qemu_rdma_accept(RDMAContext *rdma)
}
ret = rdma_accept(rdma->cm_id, &conn_param);
- if (ret) {
+ if (ret < 0) {
error_report("rdma_accept failed");
goto err_rdma_dest_wait;
}
ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (ret) {
+ if (ret < 0) {
error_report("rdma_accept get_cm_event failed");
goto err_rdma_dest_wait;
}
@@ -3498,7 +3498,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
rdma->connected = true;
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
- if (ret) {
+ if (ret < 0) {
error_report("rdma migration: error posting second control recv");
goto err_rdma_dest_wait;
}
@@ -3627,7 +3627,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f)
if (rdma->pin_all) {
ret = qemu_rdma_reg_whole_ram_blocks(rdma);
- if (ret) {
+ if (ret < 0) {
error_report("rdma migration: error dest "
"registering ram blocks");
goto err;
@@ -4088,7 +4088,7 @@ static void rdma_accept_incoming_migration(void *opaque)
trace_qemu_rdma_accept_incoming_migration();
ret = qemu_rdma_accept(rdma);
- if (ret) {
+ if (ret < 0) {
fprintf(stderr, "RDMA ERROR: Migration initialization failed\n");
return;
}
@@ -4132,7 +4132,7 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
}
ret = qemu_rdma_dest_init(rdma, errp);
- if (ret) {
+ if (ret < 0) {
goto err;
}
@@ -4140,7 +4140,7 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
ret = rdma_listen(rdma->listen_id, 5);
- if (ret) {
+ if (ret < 0) {
ERROR(errp, "listening on socket!");
goto cleanup_rdma;
}
@@ -4182,14 +4182,14 @@ void rdma_start_outgoing_migration(void *opaque,
ret = qemu_rdma_source_init(rdma, migrate_rdma_pin_all(), errp);
- if (ret) {
+ if (ret < 0) {
goto err;
}
trace_rdma_start_outgoing_migration_after_rdma_source_init();
ret = qemu_rdma_connect(rdma, false, errp);
- if (ret) {
+ if (ret < 0) {
goto err;
}
@@ -4204,13 +4204,13 @@ void rdma_start_outgoing_migration(void *opaque,
ret = qemu_rdma_source_init(rdma_return_path,
migrate_rdma_pin_all(), errp);
- if (ret) {
+ if (ret < 0) {
goto return_path_err;
}
ret = qemu_rdma_connect(rdma_return_path, true, errp);
- if (ret) {
+ if (ret < 0) {
goto return_path_err;
}
--
2.41.0
next prev parent reply other threads:[~2023-09-28 13:24 UTC|newest]
Thread overview: 121+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-28 13:19 [PATCH v2 00/53] migration/rdma: Error handling fixes Markus Armbruster
2023-09-28 13:19 ` [PATCH v2 01/53] migration/rdma: Clean up qemu_rdma_poll()'s return type Markus Armbruster
2023-10-04 14:26 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 02/53] migration/rdma: Clean up qemu_rdma_data_init()'s " Markus Armbruster
2023-10-04 14:35 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 03/53] migration/rdma: Clean up rdma_delete_block()'s " Markus Armbruster
2023-10-04 14:36 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 04/53] migration/rdma: Drop fragile wr_id formatting Markus Armbruster
2023-10-04 14:38 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 05/53] migration/rdma: Consistently use uint64_t for work request IDs Markus Armbruster
2023-10-04 14:39 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 06/53] migration/rdma: Fix unwanted integer truncation Markus Armbruster
2023-09-28 14:20 ` Fabiano Rosas
2023-10-04 14:41 ` Juan Quintela
2023-10-07 1:53 ` Zhijian Li (Fujitsu)
2023-09-28 13:19 ` [PATCH v2 07/53] migration/rdma: Clean up two more harmless signed vs. unsigned issues Markus Armbruster
2023-10-04 14:44 ` Juan Quintela
2023-10-07 2:38 ` Zhijian Li (Fujitsu)
2023-09-28 13:19 ` [PATCH v2 08/53] migration/rdma: Give qio_channel_rdma_source_funcs internal linkage Markus Armbruster
2023-10-04 14:50 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 09/53] migration/rdma: Fix qemu_rdma_accept() to return failure on errors Markus Armbruster
2023-10-04 14:51 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 10/53] migration/rdma: Put @errp parameter last Markus Armbruster
2023-10-04 14:54 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 11/53] migration/rdma: Eliminate error_propagate() Markus Armbruster
2023-10-04 14:58 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 12/53] migration/rdma: Drop rdma_add_block() error handling Markus Armbruster
2023-10-04 14:58 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 13/53] migration/rdma: Drop qemu_rdma_search_ram_block() " Markus Armbruster
2023-10-04 15:00 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 14/53] migration/rdma: Make qemu_rdma_buffer_mergeable() return bool Markus Armbruster
2023-10-04 15:01 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 15/53] migration/rdma: Use bool for two RDMAContext flags Markus Armbruster
2023-10-04 15:56 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 16/53] migration/rdma: Fix or document problematic uses of errno Markus Armbruster
2023-09-29 15:09 ` Fabiano Rosas
2023-10-04 11:12 ` Markus Armbruster
2023-10-05 6:46 ` Juan Quintela
2023-10-07 5:34 ` Zhijian Li (Fujitsu)
2023-09-28 13:19 ` [PATCH v2 17/53] migration/rdma: Ditch useless numeric error codes in error messages Markus Armbruster
2023-10-04 15:06 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 18/53] migration/rdma: Fix io_writev(), io_readv() methods to obey contract Markus Armbruster
2023-10-04 15:09 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 19/53] migration/rdma: Replace dangerous macro CHECK_ERROR_STATE() Markus Armbruster
2023-10-04 15:10 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 20/53] migration/rdma: Fix qemu_rdma_broken_ipv6_kernel() to set error Markus Armbruster
2023-10-04 15:10 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 21/53] migration/rdma: Fix qemu_get_cm_event_timeout() to always " Markus Armbruster
2023-10-04 15:25 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 22/53] migration/rdma: Drop dead qemu_rdma_data_init() code for !@host_port Markus Armbruster
2023-09-29 15:10 ` Fabiano Rosas
2023-10-04 15:24 ` Juan Quintela
2023-10-07 5:36 ` Zhijian Li (Fujitsu)
2023-09-28 13:19 ` [PATCH v2 23/53] migration/rdma: Fix QEMUFileHooks method return values Markus Armbruster
2023-10-04 15:28 ` Juan Quintela
2023-10-04 16:22 ` Juan Quintela
2023-10-04 16:37 ` Markus Armbruster
2023-09-28 13:19 ` [PATCH v2 24/53] migration/rdma: Fix rdma_getaddrinfo() error checking Markus Armbruster
2023-10-04 15:30 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 25/53] migration/rdma: Return -1 instead of negative errno code Markus Armbruster
2023-10-04 16:19 ` Juan Quintela
2023-10-04 16:23 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 26/53] migration/rdma: Dumb down remaining int error values to -1 Markus Armbruster
2023-10-04 16:25 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 27/53] migration/rdma: Replace int error_state by bool errored Markus Armbruster
2023-10-04 16:25 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 28/53] migration/rdma: Drop superfluous assignments to @ret Markus Armbruster
2023-10-04 16:27 ` Juan Quintela
2023-09-28 13:19 ` Markus Armbruster [this message]
2023-09-29 15:28 ` [PATCH v2 29/53] migration/rdma: Check negative error values the same way everywhere Fabiano Rosas
2023-10-04 16:33 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 30/53] migration/rdma: Plug a memory leak and improve a message Markus Armbruster
2023-10-04 16:27 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 31/53] migration/rdma: Delete inappropriate error_report() in macro ERROR() Markus Armbruster
2023-10-04 16:50 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 32/53] migration/rdma: Retire " Markus Armbruster
2023-10-04 16:50 ` Juan Quintela
2023-09-28 13:19 ` [PATCH v2 33/53] migration/rdma: Fix error handling around rdma_getaddrinfo() Markus Armbruster
2023-10-04 16:51 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 34/53] migration/rdma: Drop "@errp is clear" guards around error_setg() Markus Armbruster
2023-10-04 16:52 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 35/53] migration/rdma: Convert qemu_rdma_exchange_recv() to Error Markus Armbruster
2023-10-04 16:53 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 36/53] migration/rdma: Convert qemu_rdma_exchange_send() " Markus Armbruster
2023-10-04 16:55 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 37/53] migration/rdma: Convert qemu_rdma_exchange_get_response() " Markus Armbruster
2023-10-04 16:55 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 38/53] migration/rdma: Convert qemu_rdma_reg_whole_ram_blocks() " Markus Armbruster
2023-10-04 16:56 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 39/53] migration/rdma: Convert qemu_rdma_write_flush() " Markus Armbruster
2023-10-04 16:56 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 40/53] migration/rdma: Convert qemu_rdma_write_one() " Markus Armbruster
2023-10-04 16:56 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 41/53] migration/rdma: Convert qemu_rdma_write() " Markus Armbruster
2023-10-04 17:23 ` Juan Quintela
2023-09-28 13:20 ` [PATCH v2 42/53] migration/rdma: Convert qemu_rdma_post_send_control() " Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 43/53] migration/rdma: Convert qemu_rdma_post_recv_control() " Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 44/53] migration/rdma: Convert qemu_rdma_alloc_pd_cq() " Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 45/53] migration/rdma: Silence qemu_rdma_resolve_host() Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 46/53] migration/rdma: Silence qemu_rdma_connect() Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 47/53] migration/rdma: Silence qemu_rdma_reg_control() Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 48/53] migration/rdma: Don't report received completion events as error Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 49/53] migration/rdma: Silence qemu_rdma_block_for_wrid() Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 50/53] migration/rdma: Silence qemu_rdma_register_and_get_keys() Markus Armbruster
2023-09-28 13:20 ` [PATCH v2 51/53] migration/rdma: Downgrade qemu_rdma_cleanup() errors to warnings Markus Armbruster
2023-09-29 15:29 ` Fabiano Rosas
2023-10-04 17:47 ` Juan Quintela
2023-10-07 3:50 ` Zhijian Li (Fujitsu)
2023-09-28 13:20 ` [PATCH v2 52/53] migration/rdma: Use error_report() & friends instead of stderr Markus Armbruster
2023-09-29 15:36 ` Fabiano Rosas
2023-10-04 11:15 ` Markus Armbruster
2023-10-04 13:52 ` Fabiano Rosas
2023-10-05 7:24 ` Juan Quintela
2023-10-07 3:56 ` Zhijian Li (Fujitsu)
2023-09-28 13:20 ` [PATCH v2 53/53] migration/rdma: Replace flawed device detail dump by tracing Markus Armbruster
2023-09-29 17:05 ` Fabiano Rosas
2023-10-04 17:50 ` Juan Quintela
2023-10-07 3:57 ` Zhijian Li (Fujitsu)
2023-10-04 17:52 ` [PATCH v2 00/53] migration/rdma: Error handling fixes Juan Quintela
2023-10-05 5:07 ` Markus Armbruster
2023-10-05 6:37 ` Juan Quintela
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230928132019.2544702-30-armbru@redhat.com \
--to=armbru@redhat.com \
--cc=eblake@redhat.com \
--cc=farosas@suse.de \
--cc=leobras@redhat.com \
--cc=lizhijian@fujitsu.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).