From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:44025) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZX3LG-0005np-KY for qemu-devel@nongnu.org; Wed, 02 Sep 2015 04:24:27 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZX3LE-00014x-7M for qemu-devel@nongnu.org; Wed, 02 Sep 2015 04:24:26 -0400 Received: from szxga01-in.huawei.com ([58.251.152.64]:28176) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZX3LD-00014b-Bw for qemu-devel@nongnu.org; Wed, 02 Sep 2015 04:24:24 -0400 From: zhanghailiang Date: Wed, 2 Sep 2015 16:23:00 +0800 Message-ID: <1441182199-8328-14-git-send-email-zhang.zhanghailiang@huawei.com> In-Reply-To: <1441182199-8328-1-git-send-email-zhang.zhanghailiang@huawei.com> References: <1441182199-8328-1-git-send-email-zhang.zhanghailiang@huawei.com> MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [PATCH COLO-Frame v9 13/32] COLO: Load PVM's dirty pages into SVM's RAM cache temporarily List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: lizhijian@cn.fujitsu.com, quintela@redhat.com, yunhong.jiang@intel.com, eddie.dong@intel.com, peter.huangpeng@huawei.com, dgilbert@redhat.com, arei.gonglei@huawei.com, stefanha@redhat.com, amit.shah@redhat.com, yanghy@cn.fujitsu.com, zhanghailiang We should not load PVM's state directly into SVM, because, there maybe some error happen when SVM is receving data, which will break SVM. We need to ensure receving all data before load the state into SVM. We use an extra memory to cache these data (PVM's ram). The ram cache in secondary side is initially the same as SVM/PVM's memory. And in the process of checkpoint, we cache the dirty pages of PVM into this ram cache firstly, so this ram cache always the same as PVM's memory at every checkpoint, then we flush this cached ram to SVM after we receive all PVM's state. Signed-off-by: zhanghailiang Signed-off-by: Yang Hongyang Signed-off-by: Li Zhijian Signed-off-by: Gonglei --- include/exec/cpu-all.h | 1 + include/migration/colo.h | 3 ++ migration/colo.c | 14 ++++++-- migration/ram.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 107 insertions(+), 4 deletions(-) diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 89db792..d1c1af9 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -274,6 +274,7 @@ struct RAMBlock { struct rcu_head rcu; struct MemoryRegion *mr; uint8_t *host; + uint8_t *host_cache; /* For colo, VM's ram cache */ ram_addr_t offset; ram_addr_t used_length; ram_addr_t max_length; diff --git a/include/migration/colo.h b/include/migration/colo.h index 58849f7..b8a536e 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -29,4 +29,7 @@ bool migration_incoming_enable_colo(void); void migration_incoming_exit_colo(void); void *colo_process_incoming_thread(void *opaque); bool migration_incoming_in_colo_state(void); +/* ram cache */ +int colo_init_ram_cache(void); +void colo_release_ram_cache(void); #endif diff --git a/migration/colo.c b/migration/colo.c index f107032..452a77b 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -330,6 +330,12 @@ void *colo_process_incoming_thread(void *opaque) goto out; } + ret = colo_init_ram_cache(); + if (ret < 0) { + error_report("Failed to initialize ram cache"); + goto out; + } + ret = colo_ctl_put(mis->to_src_file, COLO_CMD_CHECKPOINT_READY, 0); if (ret < 0) { goto out; @@ -357,14 +363,14 @@ void *colo_process_incoming_thread(void *opaque) goto out; } - /* TODO: read migration data into colo buffer */ + /*TODO Load VM state */ ret = colo_ctl_put(mis->to_src_file, COLO_CMD_VMSTATE_RECEIVED, 0); if (ret < 0) { goto out; } - /* TODO: load vm state */ + /* TODO: flush vm state */ ret = colo_ctl_put(mis->to_src_file, COLO_CMD_VMSTATE_LOADED, 0); if (ret < 0) { @@ -378,6 +384,10 @@ out: strerror(-ret)); } + qemu_mutex_lock_iothread(); + colo_release_ram_cache(); + qemu_mutex_unlock_iothread(); + if (mis->to_src_file) { qemu_fclose(mis->to_src_file); } diff --git a/migration/ram.c b/migration/ram.c index 68980be..f3b94f2 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -225,6 +225,7 @@ static ram_addr_t last_offset; static unsigned long *migration_bitmap; static QemuMutex migration_bitmap_mutex; static uint64_t migration_dirty_pages; +static bool ram_cache_enable; static uint32_t last_version; static bool ram_bulk_stage; @@ -1353,6 +1354,8 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) return 0; } +static void *memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block); + /* Must be called from within a rcu critical section. * Returns a pointer from within the RCU-protected ram_list. */ @@ -1370,7 +1373,20 @@ static inline void *host_from_stream_offset(QEMUFile *f, return NULL; } - return memory_region_get_ram_ptr(block->mr) + offset; + if (ram_cache_enable) { + /* + * During colo checkpoint, we need bitmap of these migrated pages. + * It help us to decide which pages in ram cache should be flushed + * into VM's RAM later. + */ + long k = (block->mr->ram_addr + offset) >> TARGET_PAGE_BITS; + if (!test_and_set_bit(k, migration_bitmap)) { + migration_dirty_pages++; + } + return memory_region_get_ram_cache_ptr(block->mr, block) + offset; + } else { + return memory_region_get_ram_ptr(block->mr) + offset; + } } len = qemu_get_byte(f); @@ -1380,7 +1396,16 @@ static inline void *host_from_stream_offset(QEMUFile *f, QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { if (!strncmp(id, block->idstr, sizeof(id)) && block->max_length > offset) { - return memory_region_get_ram_ptr(block->mr) + offset; + if (ram_cache_enable) { + long k = (block->mr->ram_addr + offset) >> TARGET_PAGE_BITS; + if (!test_and_set_bit(k, migration_bitmap)) { + migration_dirty_pages++; + } + return memory_region_get_ram_cache_ptr(block->mr, block) + + offset; + } else { + return memory_region_get_ram_ptr(block->mr) + offset; + } } } @@ -1631,6 +1656,70 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) return ret; } +/* + * colo cache: this is for secondary VM, we cache the whole + * memory of the secondary VM, it will be called after first migration. + */ +int colo_init_ram_cache(void) +{ + RAMBlock *block; + + rcu_read_lock(); + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + block->host_cache = qemu_anon_ram_alloc(block->used_length, NULL); + if (!block->host_cache) { + goto out_locked; + } + memcpy(block->host_cache, block->host, block->used_length); + } + rcu_read_unlock(); + ram_cache_enable = true; + return 0; + +out_locked: + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if (block->host_cache) { + qemu_anon_ram_free(block->host_cache, block->used_length); + block->host_cache = NULL; + } + } + + rcu_read_unlock(); + return -errno; +} + +void colo_release_ram_cache(void) +{ + RAMBlock *block; + + ram_cache_enable = false; + + rcu_read_lock(); + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if (block->host_cache) { + qemu_anon_ram_free(block->host_cache, block->used_length); + block->host_cache = NULL; + } + } + rcu_read_unlock(); +} + +static void *memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block) +{ + if (mr->alias) { + return memory_region_get_ram_cache_ptr(mr->alias, block) + + mr->alias_offset; + } + + assert(mr->terminates); + + ram_addr_t addr = mr->ram_addr & TARGET_PAGE_MASK; + + assert(addr - block->offset < block->used_length); + + return block->host_cache + (addr - block->offset); +} + static SaveVMHandlers savevm_ram_handlers = { .save_live_setup = ram_save_setup, .save_live_iterate = ram_save_iterate, -- 1.8.3.1