From: zhanghailiang <zhang.zhanghailiang@huawei.com>
To: qemu-devel@nongnu.org
Cc: lizhijian@cn.fujitsu.com, quintela@redhat.com,
yunhong.jiang@intel.com, eddie.dong@intel.com,
peter.huangpeng@huawei.com, dgilbert@redhat.com,
arei.gonglei@huawei.com, amit.shah@redhat.com,
Lai Jiangshan <laijs@cn.fujitsu.com>,
Yang Hongyang <yanghy@cn.fujitsu.com>,
zhanghailiang <zhang.zhanghailiang@huawei.com>
Subject: [Qemu-devel] [PATCH COLO-Frame v7 14/34] COLO RAM: Flush cached RAM into SVM's memory
Date: Thu, 9 Jul 2015 11:16:22 +0800 [thread overview]
Message-ID: <1436411802-181876-15-git-send-email-zhang.zhanghailiang@huawei.com> (raw)
In-Reply-To: <1436411802-181876-1-git-send-email-zhang.zhanghailiang@huawei.com>
During the time of VM's running, PVM/SVM may dirty some pages, we will transfer
PVM's dirty pages to SVM and store them into SVM's RAM cache at next checkpoint
time. So, the content of SVM's RAM cache will always be some with PVM's memory
after checkpoint.
Instead of flushing all content of SVM's RAM cache into SVM's MEMORY,
we do this in a more efficient way:
Only flush any page that dirtied by PVM or SVM since last checkpoint.
In this way, we ensure SVM's memory same with PVM's.
Besides, we must ensure flush RAM cache before load device state.
Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Signed-off-by: Yang Hongyang <yanghy@cn.fujitsu.com>
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
---
include/migration/colo.h | 1 +
migration/colo.c | 2 --
migration/ram.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/include/migration/colo.h b/include/migration/colo.h
index 2fa9c73..c01364e 100644
--- a/include/migration/colo.h
+++ b/include/migration/colo.h
@@ -37,5 +37,6 @@ void *colo_process_incoming_checkpoints(void *opaque);
bool loadvm_in_colo_state(void);
/* ram cache */
int create_and_init_ram_cache(void);
+void colo_flush_ram_cache(void);
void release_ram_cache(void);
#endif
diff --git a/migration/colo.c b/migration/colo.c
index ba83966..ece8cb4 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -414,8 +414,6 @@ void *colo_process_incoming_checkpoints(void *opaque)
}
qemu_mutex_unlock_iothread();
- /* TODO: flush vm state */
-
ret = colo_ctl_put(ctl, COLO_CHECKPOINT_LOADED);
if (ret < 0) {
goto out;
diff --git a/migration/ram.c b/migration/ram.c
index 7a1a1b6..d6593f3 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1522,6 +1522,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
int flags = 0, ret = 0;
static uint64_t seq_iter;
int len = 0;
+ bool need_flush = false;
seq_iter++;
@@ -1590,6 +1591,8 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
+
+ need_flush = true;
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
break;
@@ -1600,6 +1603,8 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
+
+ need_flush = true;
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
case RAM_SAVE_FLAG_COMPRESS_PAGE:
@@ -1632,6 +1637,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
+ need_flush = true;
break;
case RAM_SAVE_FLAG_EOS:
/* normal exit */
@@ -1651,6 +1657,11 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
rcu_read_unlock();
+
+ if (!ret && ram_cache_enable && need_flush) {
+ DPRINTF("Flush ram_cache\n");
+ colo_flush_ram_cache();
+ }
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
@@ -1736,6 +1747,87 @@ static void *memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block)
return block->host_cache + (addr - block->offset);
}
+/* fix me: should this helper function be merged with
+ * migration_bitmap_find_and_reset_dirty ?
+ */
+static inline
+ram_addr_t host_bitmap_find_and_reset_dirty(MemoryRegion *mr,
+ ram_addr_t start)
+{
+ unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
+ unsigned long nr = base + (start >> TARGET_PAGE_BITS);
+ uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
+ unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
+
+ unsigned long next;
+
+ next = find_next_bit(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION],
+ size, nr);
+ if (next < size) {
+ clear_bit(next, ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ }
+ return (next - base) << TARGET_PAGE_BITS;
+}
+
+/*
+* Flush content of RAM cache into SVM's memory.
+* Only flush the pages that be dirtied by PVM or SVM or both.
+*/
+void colo_flush_ram_cache(void)
+{
+ RAMBlock *block = NULL;
+ void *dst_host;
+ void *src_host;
+ ram_addr_t ca = 0, ha = 0;
+ bool got_ca = 0, got_ha = 0;
+ int64_t host_dirty = 0, both_dirty = 0;
+
+ address_space_sync_dirty_bitmap(&address_space_memory);
+ rcu_read_lock();
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
+ while (true) {
+ if (ca < block->used_length && ca <= ha) {
+ ca = migration_bitmap_find_and_reset_dirty(block->mr, ca);
+ if (ca < block->used_length) {
+ got_ca = 1;
+ }
+ }
+ if (ha < block->used_length && ha <= ca) {
+ ha = host_bitmap_find_and_reset_dirty(block->mr, ha);
+ if (ha < block->used_length && ha != ca) {
+ got_ha = 1;
+ }
+ host_dirty += (ha < block->used_length ? 1 : 0);
+ both_dirty += (ha < block->used_length && ha == ca ? 1 : 0);
+ }
+ if (ca >= block->used_length && ha >= block->used_length) {
+ ca = 0;
+ ha = 0;
+ block = QLIST_NEXT_RCU(block, next);
+ if (!block) {
+ break;
+ }
+ } else {
+ if (got_ha) {
+ got_ha = 0;
+ dst_host = memory_region_get_ram_ptr(block->mr) + ha;
+ src_host = memory_region_get_ram_cache_ptr(block->mr, block)
+ + ha;
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ }
+ if (got_ca) {
+ got_ca = 0;
+ dst_host = memory_region_get_ram_ptr(block->mr) + ca;
+ src_host = memory_region_get_ram_cache_ptr(block->mr, block)
+ + ca;
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ }
+ }
+ }
+ rcu_read_unlock();
+ assert(migration_dirty_pages == 0);
+}
+
static SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
--
1.7.12.4
next prev parent reply other threads:[~2015-07-09 3:18 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-09 3:16 [Qemu-devel] [PATCH COLO-Frame v7 00/34] COarse-grain LOck-stepping(COLO) Virtual Machines for Non-stop Service (FT) zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 01/34] configure: Add parameter for configure to enable/disable COLO support zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 02/34] migration: Introduce capability 'colo' to migration zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 03/34] COLO: migrate colo related info to slave zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 04/34] colo-comm/migration: skip colo info section for special cases zhanghailiang
2015-07-17 17:07 ` Dr. David Alan Gilbert
2015-07-20 8:42 ` zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 05/34] migration: Integrate COLO checkpoint process into migration zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 06/34] migration: Integrate COLO checkpoint process into loadvm zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 07/34] COLO: Implement colo checkpoint protocol zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 08/34] COLO: Add a new RunState RUN_STATE_COLO zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 09/34] QEMUSizedBuffer: Introduce two help functions for qsb zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 10/34] COLO: Save VM state to slave when do checkpoint zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 11/34] COLO RAM: Load PVM's dirty page into SVM's RAM cache temporarily zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 12/34] COLO VMstate: Load VM state into qsb before restore it zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 13/34] arch_init: Start to trace dirty pages of SVM zhanghailiang
2015-07-09 3:16 ` zhanghailiang [this message]
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 15/34] COLO failover: Introduce a new command to trigger a failover zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 16/34] COLO failover: Introduce state to record failover process zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 17/34] COLO failover: Implement COLO primary/secondary vm failover work zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 18/34] qmp event: Add event notification for COLO error zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 19/34] COLO failover: Don't do failover during loading VM's state zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 20/34] COLO: Add new command parameter 'forward_nic' 'colo_script' for net zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 21/34] COLO NIC: Init/remove colo nic devices when add/cleanup tap devices zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 22/34] tap: Make launch_script() public zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 23/34] COLO NIC: Implement colo nic device interface configure() zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 24/34] colo-nic: Handle secondary VM's original net device configure zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 25/34] COLO NIC: Implement colo nic init/destroy function zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 26/34] COLO NIC: Some init work related with proxy module zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 27/34] COLO: Handle nfnetlink message from " zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 28/34] COLO: Do checkpoint according to the result of packets comparation zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 29/34] COLO: Improve checkpoint efficiency by do additional periodic checkpoint zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 30/34] COLO: Add colo-set-checkpoint-period command zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 31/34] COLO NIC: Implement NIC checkpoint and failover zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 32/34] COLO: Disable qdev hotplug when VM is in COLO mode zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 33/34] COLO: Implement shutdown checkpoint zhanghailiang
2015-07-09 3:16 ` [Qemu-devel] [PATCH COLO-Frame v7 34/34] COLO: Add block replication into colo process zhanghailiang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1436411802-181876-15-git-send-email-zhang.zhanghailiang@huawei.com \
--to=zhang.zhanghailiang@huawei.com \
--cc=amit.shah@redhat.com \
--cc=arei.gonglei@huawei.com \
--cc=dgilbert@redhat.com \
--cc=eddie.dong@intel.com \
--cc=laijs@cn.fujitsu.com \
--cc=lizhijian@cn.fujitsu.com \
--cc=peter.huangpeng@huawei.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=yanghy@cn.fujitsu.com \
--cc=yunhong.jiang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).