From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:52223) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a4PD5-0003dm-OQ for qemu-devel@nongnu.org; Thu, 03 Dec 2015 03:25:52 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1a4PD1-0000wh-I2 for qemu-devel@nongnu.org; Thu, 03 Dec 2015 03:25:51 -0500 Received: from szxga02-in.huawei.com ([119.145.14.65]:11910) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a4PD0-0000un-Ap for qemu-devel@nongnu.org; Thu, 03 Dec 2015 03:25:47 -0500 References: <1448357149-17572-1-git-send-email-zhang.zhanghailiang@huawei.com> <1448357149-17572-16-git-send-email-zhang.zhanghailiang@huawei.com> <20151201190251.GD31209@work-vm> From: Hailiang Zhang Message-ID: <565FFC68.901@huawei.com> Date: Thu, 3 Dec 2015 16:25:12 +0800 MIME-Version: 1.0 In-Reply-To: <20151201190251.GD31209@work-vm> Content-Type: text/plain; charset="windows-1252"; format=flowed Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [PATCH COLO-Frame v11 15/39] COLO: Load PVM's dirty pages into SVM's RAM cache temporarily List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: "Dr. David Alan Gilbert" Cc: lizhijian@cn.fujitsu.com, quintela@redhat.com, yunhong.jiang@intel.com, eddie.dong@intel.com, peter.huangpeng@huawei.com, qemu-devel@nongnu.org, arei.gonglei@huawei.com, stefanha@redhat.com, amit.shah@redhat.com, hongyang.yang@easystack.cn On 2015/12/2 3:02, Dr. David Alan Gilbert wrote: > * zhanghailiang (zhang.zhanghailiang@huawei.com) wrote: >> We should not load PVM's state directly into SVM, because there maybe some >> errors happen when SVM is receving data, which will break SVM. >> >> We need to ensure receving all data before load the state into SVM. We use >> an extra memory to cache these data (PVM's ram). The ram cache in secondary side >> is initially the same as SVM/PVM's memory. And in the process of checkpoint, >> we cache the dirty pages of PVM into this ram cache firstly, so this ram cache >> always the same as PVM's memory at every checkpoint, then we flush this cached ram >> to SVM after we receive all PVM's state. >> >> Signed-off-by: zhanghailiang >> Signed-off-by: Li Zhijian >> Signed-off-by: Gonglei >> --- >> v11: >> - Rename 'host_cache' to 'colo_cache' (Dave's suggestion) >> v10: >> - Split the process of dirty pages recording into a new patch >> --- >> include/exec/ram_addr.h | 1 + >> include/migration/migration.h | 4 +++ >> migration/colo.c | 10 +++++++ >> migration/ram.c | 69 ++++++++++++++++++++++++++++++++++++++++++- >> 4 files changed, 83 insertions(+), 1 deletion(-) >> >> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h >> index 7115154..bb44f66 100644 >> --- a/include/exec/ram_addr.h >> +++ b/include/exec/ram_addr.h >> @@ -26,6 +26,7 @@ struct RAMBlock { >> struct rcu_head rcu; >> struct MemoryRegion *mr; >> uint8_t *host; >> + uint8_t *colo_cache; /* For colo, VM's ram cache */ >> ram_addr_t offset; >> ram_addr_t used_length; >> ram_addr_t max_length; >> diff --git a/include/migration/migration.h b/include/migration/migration.h >> index ba5bcec..e41372d 100644 >> --- a/include/migration/migration.h >> +++ b/include/migration/migration.h >> @@ -332,4 +332,8 @@ int ram_save_queue_pages(MigrationState *ms, const char *rbname, >> PostcopyState postcopy_state_get(void); >> /* Set the state and return the old state */ >> PostcopyState postcopy_state_set(PostcopyState new_state); >> + >> +/* ram cache */ >> +int colo_init_ram_cache(void); >> +void colo_release_ram_cache(void); >> #endif >> diff --git a/migration/colo.c b/migration/colo.c >> index 012d8e5..6e933fa 100644 >> --- a/migration/colo.c >> +++ b/migration/colo.c >> @@ -304,6 +304,12 @@ void *colo_process_incoming_thread(void *opaque) >> qemu_set_block(qemu_get_fd(mis->from_src_file)); >> >> >> + ret = colo_init_ram_cache(); >> + if (ret < 0) { >> + error_report("Failed to initialize ram cache"); >> + goto out; >> + } >> + >> ret = colo_ctl_put(mis->to_src_file, COLO_COMMAND_CHECKPOINT_READY, 0); >> if (ret < 0) { >> goto out; >> @@ -353,6 +359,10 @@ out: >> strerror(-ret)); >> } >> >> + qemu_mutex_lock_iothread(); >> + colo_release_ram_cache(); >> + qemu_mutex_unlock_iothread(); >> + >> if (mis->to_src_file) { >> qemu_fclose(mis->to_src_file); >> } >> diff --git a/migration/ram.c b/migration/ram.c >> index a161620..9d946a1 100644 >> --- a/migration/ram.c >> +++ b/migration/ram.c >> @@ -223,6 +223,7 @@ static RAMBlock *last_sent_block; >> static ram_addr_t last_offset; >> static QemuMutex migration_bitmap_mutex; >> static uint64_t migration_dirty_pages; >> +static bool ram_cache_enable; >> static uint32_t last_version; >> static bool ram_bulk_stage; >> >> @@ -2175,6 +2176,16 @@ static inline void *host_from_ram_block_offset(RAMBlock *block, >> return block->host + offset; >> } >> >> +static inline void *colo_cache_from_block_offset(RAMBlock *block, >> + ram_addr_t offset) >> +{ >> + if (!block) { >> + return NULL; >> + } >> + >> + return block->colo_cache + offset; >> +} >> + >> /* >> * If a page (or a whole RDMA chunk) has been >> * determined to be zero, then zap it. >> @@ -2454,7 +2465,12 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { >> RAMBlock *block = ram_block_from_stream(f, addr, flags); >> >> - host = host_from_ram_block_offset(block, addr); >> + /* After going into COLO, we should load the Page into colo_cache */ >> + if (ram_cache_enable) { >> + host = colo_cache_from_block_offset(block, addr); >> + } else { >> + host = host_from_ram_block_offset(block, addr); >> + } >> if (!host) { >> error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); >> ret = -EINVAL; >> @@ -2550,6 +2566,57 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> return ret; >> } >> >> +/* >> + * colo cache: this is for secondary VM, we cache the whole >> + * memory of the secondary VM, it will be called after first migration. >> + */ >> +int colo_init_ram_cache(void) >> +{ >> + RAMBlock *block; >> + >> + rcu_read_lock(); >> + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { >> + block->colo_cache = qemu_anon_ram_alloc(block->used_length, NULL); >> + if (!block->colo_cache) { >> + error_report("%s: Can't alloc memory for colo cache of block %s," >> + "size %zu", __func__, block->idstr, >> + block->used_length); > > Minor one that I didn't spot before; I think that has to be RAM_ADDR_FMT instead of %zu > OK, i will fix it, thanks. > However, other than that; > > Reviewed-by: Dr. David Alan Gilbert > > Dave > >> + goto out_locked; >> + } >> + memcpy(block->colo_cache, block->host, block->used_length); >> + } >> + rcu_read_unlock(); >> + ram_cache_enable = true; >> + return 0; >> + >> +out_locked: >> + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { >> + if (block->colo_cache) { >> + qemu_anon_ram_free(block->colo_cache, block->used_length); >> + block->colo_cache = NULL; >> + } >> + } >> + >> + rcu_read_unlock(); >> + return -errno; >> +} >> + >> +void colo_release_ram_cache(void) >> +{ >> + RAMBlock *block; >> + >> + ram_cache_enable = false; >> + >> + rcu_read_lock(); >> + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { >> + if (block->colo_cache) { >> + qemu_anon_ram_free(block->colo_cache, block->used_length); >> + block->colo_cache = NULL; >> + } >> + } >> + rcu_read_unlock(); >> +} >> + >> static SaveVMHandlers savevm_ram_handlers = { >> .save_live_setup = ram_save_setup, >> .save_live_iterate = ram_save_iterate, >> -- >> 1.8.3.1 >> >> > -- > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK > > . >