From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: dgilbert@redhat.com, lvivier@redhat.com, peterx@redhat.com
Subject: [Qemu-devel] [PATCH v10 19/24] migration: Create multifd channels
Date: Wed, 7 Mar 2018 12:00:05 +0100 [thread overview]
Message-ID: <20180307110010.2205-20-quintela@redhat.com> (raw)
In-Reply-To: <20180307110010.2205-1-quintela@redhat.com>
In both sides. We still don't transmit anything through them.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
migration/ram.c | 52 ++++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 42 insertions(+), 10 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index b57d9fd667..7ef0c2b7e2 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -399,6 +399,7 @@ struct MultiFDSendParams {
uint8_t id;
char *name;
QemuThread thread;
+ QIOChannel *c;
QemuSemaphore sem;
QemuMutex mutex;
bool running;
@@ -455,6 +456,8 @@ int multifd_save_cleanup(Error **errp)
qemu_thread_join(&p->thread);
p->running = false;
}
+ socket_send_channel_destroy(p->c);
+ p->c = NULL;
qemu_mutex_destroy(&p->mutex);
qemu_sem_destroy(&p->sem);
g_free(p->name);
@@ -514,6 +517,27 @@ static void *multifd_send_thread(void *opaque)
return NULL;
}
+static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
+{
+ MultiFDSendParams *p = opaque;
+ QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
+ Error *local_err = NULL;
+
+ if (qio_task_propagate_error(task, &local_err)) {
+ if (multifd_save_cleanup(&local_err) != 0) {
+ migrate_set_error(migrate_get_current(), local_err);
+ }
+ } else {
+ p->c = QIO_CHANNEL(sioc);
+ qio_channel_set_delay(p->c, false);
+ p->running = true;
+ qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
+ QEMU_THREAD_JOINABLE);
+
+ atomic_inc(&multifd_send_state->count);
+ }
+}
+
int multifd_save_setup(void)
{
int thread_count;
@@ -536,11 +560,7 @@ int multifd_save_setup(void)
p->quit = false;
p->id = i;
p->name = g_strdup_printf("multifdsend_%d", i);
- p->running = true;
- qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
- QEMU_THREAD_JOINABLE);
-
- atomic_inc(&multifd_send_state->count);
+ socket_send_channel_create(multifd_new_send_channel_async, p);
}
return 0;
@@ -550,6 +570,7 @@ struct MultiFDRecvParams {
uint8_t id;
char *name;
QemuThread thread;
+ QIOChannel *c;
QemuSemaphore sem;
QemuMutex mutex;
bool running;
@@ -606,6 +627,8 @@ int multifd_load_cleanup(Error **errp)
qemu_thread_join(&p->thread);
p->running = false;
}
+ socket_recv_channel_unref(p->c);
+ p->c = NULL;
qemu_mutex_destroy(&p->mutex);
qemu_sem_destroy(&p->sem);
g_free(p->name);
@@ -688,10 +711,6 @@ int multifd_load_setup(void)
p->quit = false;
p->id = i;
p->name = g_strdup_printf("multifdrecv_%d", i);
- p->running = true;
- qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
- QEMU_THREAD_JOINABLE);
- atomic_inc(&multifd_recv_state->count);
}
return 0;
@@ -710,7 +729,20 @@ bool multifd_recv_all_channels_created(void)
void multifd_recv_new_channel(QIOChannel *ioc)
{
- /* nothing to do yet */
+ MultiFDRecvParams *p;
+ /* we need to invent channels id's until we transmit */
+ /* we will remove this on a later patch */
+ static int i = 0;
+
+ p = &multifd_recv_state->params[i];
+ i++;
+ p->c = ioc;
+ socket_recv_channel_ref(ioc);
+
+ p->running = true;
+ qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
+ QEMU_THREAD_JOINABLE);
+ atomic_inc(&multifd_recv_state->count);
}
/**
--
2.14.3
next prev parent reply other threads:[~2018-03-07 11:00 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-07 10:59 [Qemu-devel] [RFC v10 00/24] Multifd Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 01/24] tests: Add migration precopy test Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 02/24] tests: Add migration xbzrle test Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 03/24] migration: Create tcp_port parameter Juan Quintela
2018-03-07 11:38 ` Daniel P. Berrangé
2018-03-14 14:48 ` Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 04/24] migration: Set the migration tcp port Juan Quintela
2018-03-07 11:40 ` Daniel P. Berrangé
2018-03-14 14:51 ` Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 05/24] tests: Migration ppc now inlines its program Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 06/24] tests: Add basic migration precopy tcp test Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 07/24] [RFH] tests: Add migration compress threads tests Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 08/24] migration: Add multifd test Juan Quintela
2018-03-13 16:53 ` Dr. David Alan Gilbert
2018-03-14 14:52 ` Juan Quintela
2018-03-14 14:53 ` Dr. David Alan Gilbert
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 09/24] migration: Set error state in case of error Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 10/24] migration: In case of error just end the migration Juan Quintela
2018-03-07 11:52 ` Daniel P. Berrangé
2018-03-08 2:39 ` Eric Blake
2018-03-07 15:08 ` Dr. David Alan Gilbert
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 11/24] migration: terminate_* can be called for other threads Juan Quintela
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 12/24] migration: Reference counting recv channels correctly Juan Quintela
2018-03-07 11:56 ` Daniel P. Berrangé
2018-03-07 10:59 ` [Qemu-devel] [PATCH v10 13/24] migration: Introduce multifd_recv_new_channel() Juan Quintela
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 14/24] migration: Be sure all recv channels are created Juan Quintela
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 15/24] migration: Synchronize send threads Juan Quintela
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 16/24] migration: Synchronize recv threads Juan Quintela
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 17/24] migration: Export functions to create send channels Juan Quintela
2018-03-07 12:00 ` Daniel P. Berrangé
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 18/24] migration: Add multifd traces for start/end thread Juan Quintela
2018-03-07 12:01 ` Daniel P. Berrangé
2018-03-07 12:11 ` Daniel P. Berrangé
2018-03-07 11:00 ` Juan Quintela [this message]
2018-03-12 9:19 ` [Qemu-devel] [PATCH v10 19/24] migration: Create multifd channels Peter Xu
2018-03-15 12:57 ` Juan Quintela
2018-03-16 3:07 ` Peter Xu
2018-03-16 8:43 ` Juan Quintela
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 20/24] migration: Delay start of migration main routines Juan Quintela
2018-03-12 9:36 ` Peter Xu
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 21/24] migration: Transmit initial package through the multifd channels Juan Quintela
2018-03-07 12:07 ` Daniel P. Berrangé
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 22/24] migration: Create ram_multifd_page Juan Quintela
2018-03-07 12:12 ` Daniel P. Berrangé
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 23/24] migration: Create pages structure for reception Juan Quintela
2018-03-07 11:00 ` [Qemu-devel] [PATCH v10 24/24] [RFC] migration: Send pages through the multifd channels Juan Quintela
2018-03-07 12:14 ` Daniel P. Berrangé
2018-03-07 11:26 ` [Qemu-devel] [RFC v10 00/24] Multifd no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180307110010.2205-20-quintela@redhat.com \
--to=quintela@redhat.com \
--cc=dgilbert@redhat.com \
--cc=lvivier@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).