From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:38438) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YVsqq-0001kp-5C for qemu-devel@nongnu.org; Wed, 11 Mar 2015 22:27:57 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YVsqj-0002Df-Vw for qemu-devel@nongnu.org; Wed, 11 Mar 2015 22:27:56 -0400 Received: from szxga03-in.huawei.com ([119.145.14.66]:35989) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YVsqi-0002Cg-RS for qemu-devel@nongnu.org; Wed, 11 Mar 2015 22:27:49 -0400 Message-ID: <5500F994.3000300@huawei.com> Date: Thu, 12 Mar 2015 10:27:32 +0800 From: zhanghailiang MIME-Version: 1.0 References: <1423711034-5340-1-git-send-email-zhang.zhanghailiang@huawei.com> <1423711034-5340-14-git-send-email-zhang.zhanghailiang@huawei.com> <20150311200717.GS2334@work-vm> In-Reply-To: <20150311200717.GS2334@work-vm> Content-Type: text/plain; charset="windows-1252"; format=flowed Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [PATCH RFC v3 13/27] COLO RAM: Flush cached RAM into SVM's memory List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: "Dr. David Alan Gilbert" Cc: hangaohuai@huawei.com, Lai Jiangshan , Li Zhijian , yunhong.jiang@intel.com, eddie.dong@intel.com, peter.huangpeng@huawei.com, qemu-devel@nongnu.org, Gonglei , stefanha@redhat.com, pbonzini@redhat.com, Yang Hongyang On 2015/3/12 4:07, Dr. David Alan Gilbert wrote: > * zhanghailiang (zhang.zhanghailiang@huawei.com) wrote: >> We only need to flush RAM that is both dirty on PVM and SVM since >> last checkpoint. Besides, we must ensure flush RAM cache before load >> device state. > > Actually with a follow up to my previous question, can you explain the 'both' > in that description. > The description is wrong, It should be 'any page that dirtied by PVM or SVM'. Sorry for my poor english. > If a page was dirty on just the PVM, but not the SVM, you would have to copy > the new PVM page into the SVM ram before executing with the newly received device > state, otherwise the device state would be inconsistent with the RAM state. > > Dave > >> Signed-off-by: zhanghailiang a >> Signed-off-by: Lai Jiangshan >> Signed-off-by: Li Zhijian >> Signed-off-by: Yang Hongyang >> Signed-off-by: Gonglei >> --- >> arch_init.c | 91 +++++++++++++++++++++++++++++++++++++- >> include/migration/migration-colo.h | 1 + >> migration/colo.c | 1 - >> 3 files changed, 91 insertions(+), 2 deletions(-) >> >> diff --git a/arch_init.c b/arch_init.c >> index 4a1d825..f70de23 100644 >> --- a/arch_init.c >> +++ b/arch_init.c >> @@ -1100,6 +1100,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> { >> int flags = 0, ret = 0; >> static uint64_t seq_iter; >> + bool need_flush = false; >> >> seq_iter++; >> >> @@ -1163,6 +1164,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> break; >> } >> >> + need_flush = true; >> ch = qemu_get_byte(f); >> ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); >> break; >> @@ -1174,6 +1176,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> break; >> } >> >> + need_flush = true; >> qemu_get_buffer(f, host, TARGET_PAGE_SIZE); >> break; >> case RAM_SAVE_FLAG_XBZRLE: >> @@ -1190,6 +1193,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> ret = -EINVAL; >> break; >> } >> + need_flush = true; >> break; >> case RAM_SAVE_FLAG_EOS: >> /* normal exit */ >> @@ -1207,7 +1211,10 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> ret = qemu_file_get_error(f); >> } >> } >> - >> + if (!ret && ram_cache_enable && need_flush) { >> + DPRINTF("Flush ram_cache\n"); >> + colo_flush_ram_cache(); >> + } >> DPRINTF("Completed load of VM with exit code %d seq iteration " >> "%" PRIu64 "\n", ret, seq_iter); >> return ret; >> @@ -1220,6 +1227,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) >> void create_and_init_ram_cache(void) >> { >> RAMBlock *block; >> + int64_t ram_cache_pages = last_ram_offset() >> TARGET_PAGE_BITS; >> >> QTAILQ_FOREACH(block, &ram_list.blocks, next) { >> block->host_cache = g_malloc(block->used_length); >> @@ -1227,6 +1235,14 @@ void create_and_init_ram_cache(void) >> } >> >> ram_cache_enable = true; >> + /* >> + * Start dirty log for slave VM, we will use this dirty bitmap together with >> + * VM's cache RAM dirty bitmap to decide which page in cache should be >> + * flushed into VM's RAM. >> + */ >> + migration_bitmap = bitmap_new(ram_cache_pages); >> + migration_dirty_pages = 0; >> + memory_global_dirty_log_start(); >> } >> >> void release_ram_cache(void) >> @@ -1261,6 +1277,79 @@ static void *memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block) >> return block->host_cache + (addr - block->offset); >> } >> >> +static inline >> +ram_addr_t host_bitmap_find_and_reset_dirty(MemoryRegion *mr, >> + ram_addr_t start) >> +{ >> + unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS; >> + unsigned long nr = base + (start >> TARGET_PAGE_BITS); >> + unsigned long size = base + (int128_get64(mr->size) >> TARGET_PAGE_BITS); >> + >> + unsigned long next; >> + >> + next = find_next_bit(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], >> + size, nr); >> + if (next < size) { >> + clear_bit(next, ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]); >> + } >> + return (next - base) << TARGET_PAGE_BITS; >> +} >> + >> +void colo_flush_ram_cache(void) >> +{ >> + RAMBlock *block = NULL; >> + void *dst_host; >> + void *src_host; >> + ram_addr_t ca = 0, ha = 0; >> + bool got_ca = 0, got_ha = 0; >> + int64_t host_dirty = 0, both_dirty = 0; >> + >> + address_space_sync_dirty_bitmap(&address_space_memory); >> + >> + block = QTAILQ_FIRST(&ram_list.blocks); >> + while (true) { >> + if (ca < block->used_length && ca <= ha) { >> + ca = migration_bitmap_find_and_reset_dirty(block->mr, ca); >> + if (ca < block->used_length) { >> + got_ca = 1; >> + } >> + } >> + if (ha < block->used_length && ha <= ca) { >> + ha = host_bitmap_find_and_reset_dirty(block->mr, ha); >> + if (ha < block->used_length && ha != ca) { >> + got_ha = 1; >> + } >> + host_dirty += (ha < block->used_length ? 1 : 0); >> + both_dirty += (ha < block->used_length && ha == ca ? 1 : 0); >> + } >> + if (ca >= block->used_length && ha >= block->used_length) { >> + ca = 0; >> + ha = 0; >> + block = QTAILQ_NEXT(block, next); >> + if (!block) { >> + break; >> + } >> + } else { >> + if (got_ha) { >> + got_ha = 0; >> + dst_host = memory_region_get_ram_ptr(block->mr) + ha; >> + src_host = memory_region_get_ram_cache_ptr(block->mr, block) >> + + ha; >> + memcpy(dst_host, src_host, TARGET_PAGE_SIZE); >> + } >> + if (got_ca) { >> + got_ca = 0; >> + dst_host = memory_region_get_ram_ptr(block->mr) + ca; >> + src_host = memory_region_get_ram_cache_ptr(block->mr, block) >> + + ca; >> + memcpy(dst_host, src_host, TARGET_PAGE_SIZE); >> + } >> + } >> + } >> + >> + assert(migration_dirty_pages == 0); >> +} >> + >> static SaveVMHandlers savevm_ram_handlers = { >> .save_live_setup = ram_save_setup, >> .save_live_iterate = ram_save_iterate, >> diff --git a/include/migration/migration-colo.h b/include/migration/migration-colo.h >> index 7d43aed..2084fe2 100644 >> --- a/include/migration/migration-colo.h >> +++ b/include/migration/migration-colo.h >> @@ -36,5 +36,6 @@ void *colo_process_incoming_checkpoints(void *opaque); >> bool loadvm_in_colo_state(void); >> /* ram cache */ >> void create_and_init_ram_cache(void); >> +void colo_flush_ram_cache(void); >> void release_ram_cache(void); >> #endif >> diff --git a/migration/colo.c b/migration/colo.c >> index a0e1b7a..5ff2ee8 100644 >> --- a/migration/colo.c >> +++ b/migration/colo.c >> @@ -397,7 +397,6 @@ void *colo_process_incoming_checkpoints(void *opaque) >> } >> DPRINTF("Finish load all vm state to cache\n"); >> qemu_mutex_unlock_iothread(); >> - /* TODO: flush vm state */ >> >> ret = colo_ctl_put(ctl, COLO_CHECKPOINT_LOADED); >> if (ret < 0) { >> -- >> 1.7.12.4 >> >> > -- > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK > > . >