From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:60968) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bUvGR-0000pC-Cf for qemu-devel@nongnu.org; Wed, 03 Aug 2016 08:27:14 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1bUvGQ-0006fQ-2o for qemu-devel@nongnu.org; Wed, 03 Aug 2016 08:27:11 -0400 Received: from szxga01-in.huawei.com ([58.251.152.64]:30692) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1bUvGP-0006dq-8L for qemu-devel@nongnu.org; Wed, 03 Aug 2016 08:27:10 -0400 From: zhanghailiang Date: Wed, 3 Aug 2016 20:26:12 +0800 Message-ID: <1470227172-13704-35-git-send-email-zhang.zhanghailiang@huawei.com> In-Reply-To: <1470227172-13704-1-git-send-email-zhang.zhanghailiang@huawei.com> References: <1470227172-13704-1-git-send-email-zhang.zhanghailiang@huawei.com> MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [PATCH COLO-Frame v18 34/34] COLO: Add block replication into colo process List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: amit.shah@redhat.com, quintela@redhat.com, dgilbert@redhat.com, peter.huangpeng@huawei.com, eddie.dong@intel.com, wency@cn.fujitsu.com, lizhijian@cn.fujitsu.com, zhangchen.fnst@cn.fujitsu.com, xiecl.fnst@cn.fujitsu.com, zhanghailiang , Stefan Hajnoczi , Kevin Wolf , Max Reitz Make sure master start block replication after slave's block replication started. Signed-off-by: zhanghailiang Signed-off-by: Wen Congyang Signed-off-by: Li Zhijian Cc: Stefan Hajnoczi Cc: Kevin Wolf Cc: Max Reitz --- migration/colo.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ migration/migration.c | 6 +++++- 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/migration/colo.c b/migration/colo.c index ac16d61..52eb403 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -23,6 +23,8 @@ #include "net/net.h" #include "net/filter.h" #include "net/vhost_net.h" +#include "block/block.h" +#include "replication.h" static bool vmstate_loading; @@ -64,6 +66,7 @@ static void secondary_vm_do_failover(void) { int old_state; MigrationIncomingState *mis = migration_incoming_get_current(); + Error *local_err = NULL; /* Can not do failover during the process of VM's loading VMstate, Or * it will break the secondary VM. @@ -81,6 +84,11 @@ static void secondary_vm_do_failover(void) migrate_set_state(&mis->state, MIGRATION_STATUS_COLO, MIGRATION_STATUS_COMPLETED); + replication_stop_all(true, &local_err); + if (local_err) { + error_report_err(local_err); + } + if (!autostart) { error_report("\"-S\" qemu option will be ignored in secondary side"); /* recover runstate to normal migration finish state */ @@ -159,6 +167,11 @@ static void primary_vm_do_failover(void) error_report_err(local_err); } + replication_stop_all(true, &local_err); + if (local_err) { + error_report_err(local_err); + } + /* Notify COLO thread that failover work is finished */ qemu_sem_post(&s->colo_exit_sem); } @@ -317,6 +330,15 @@ static int colo_do_checkpoint_transaction(MigrationState *s, if (local_err) { goto out; } + + /* We call this API although this may do nothing on primary side. */ + qemu_mutex_lock_iothread(); + replication_do_checkpoint_all(&local_err); + qemu_mutex_unlock_iothread(); + if (local_err) { + goto out; + } + colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { goto out; @@ -483,6 +505,12 @@ static void colo_process_checkpoint(MigrationState *s) object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); + replication_start_all(REPLICATION_MODE_PRIMARY, &local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + vm_start(); qemu_mutex_unlock_iothread(); trace_colo_vm_state_change("stop", "run"); @@ -576,6 +604,7 @@ static void colo_wait_handle_message(QEMUFile *f, int *checkpoint_request, case COLO_MESSAGE_GUEST_SHUTDOWN: qemu_mutex_lock_iothread(); vm_stop_force_state(RUN_STATE_COLO); + replication_stop_all(false, NULL); qemu_system_shutdown_request_core(); qemu_mutex_unlock_iothread(); /* @@ -645,6 +674,14 @@ void *colo_process_incoming_thread(void *opaque) goto out; } + qemu_mutex_lock_iothread(); + bdrv_invalidate_cache_all(&local_err); + replication_start_all(REPLICATION_MODE_SECONDARY, &local_err); + qemu_mutex_unlock_iothread(); + if (local_err) { + goto out; + } + colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, &local_err); if (local_err) { @@ -725,6 +762,18 @@ void *colo_process_incoming_thread(void *opaque) goto out; } + replication_get_error_all(&local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + /* discard colo disk buffer */ + replication_do_checkpoint_all(&local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + vmstate_loading = false; qemu_mutex_unlock_iothread(); diff --git a/migration/migration.c b/migration/migration.c index 3358980..5b5971f 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1679,7 +1679,11 @@ static void migration_completion(MigrationState *s, int current_active_state, if (!ret) { ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); - if (ret >= 0) { + /* + * Don't mark the image with BDRV_O_INACTIVE flag if + * we will go into COLO stage later. + */ + if (ret >= 0 && !migrate_colo_enabled()) { ret = bdrv_inactivate_all(); } if (ret >= 0) { -- 1.8.3.1