From: Peter Xu <peterx@redhat.com>
To: qemu-devel@nongnu.org
Cc: Alexey Perevalov <a.perevalov@samsung.com>,
"Daniel P . Berrange" <berrange@redhat.com>,
Juan Quintela <quintela@redhat.com>,
Andrea Arcangeli <aarcange@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
peterx@redhat.com
Subject: [Qemu-devel] [PATCH v8 16/24] migration: synchronize dirty bitmap for resume
Date: Wed, 2 May 2018 18:47:32 +0800 [thread overview]
Message-ID: <20180502104740.12123-17-peterx@redhat.com> (raw)
In-Reply-To: <20180502104740.12123-1-peterx@redhat.com>
This patch implements the first part of core RAM resume logic for
postcopy. ram_resume_prepare() is provided for the work.
When the migration is interrupted by network failure, the dirty bitmap
on the source side will be meaningless, because even the dirty bit is
cleared, it is still possible that the sent page was lost along the way
to destination. Here instead of continue the migration with the old
dirty bitmap on source, we ask the destination side to send back its
received bitmap, then invert it to be our initial dirty bitmap.
The source side send thread will issue the MIG_CMD_RECV_BITMAP requests,
once per ramblock, to ask for the received bitmap. On destination side,
MIG_RP_MSG_RECV_BITMAP will be issued, along with the requested bitmap.
Data will be received on the return-path thread of source, and the main
migration thread will be notified when all the ramblock bitmaps are
synchronized.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/migration.h | 1 +
migration/migration.c | 2 ++
migration/ram.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
migration/trace-events | 4 ++++
4 files changed, 54 insertions(+)
diff --git a/migration/migration.h b/migration/migration.h
index e8f0afb8eb..2f0dc34ae9 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -135,6 +135,7 @@ struct MigrationState
QEMUFile *from_dst_file;
QemuThread rp_thread;
bool error;
+ QemuSemaphore rp_sem;
} rp_state;
double mbps;
diff --git a/migration/migration.c b/migration/migration.c
index a8c50783f7..960827d3ea 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2962,6 +2962,7 @@ static void migration_instance_finalize(Object *obj)
qemu_sem_destroy(&ms->pause_sem);
qemu_sem_destroy(&ms->postcopy_pause_sem);
qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
+ qemu_sem_destroy(&ms->rp_state.rp_sem);
error_free(ms->error);
}
@@ -2994,6 +2995,7 @@ static void migration_instance_init(Object *obj)
qemu_sem_init(&ms->postcopy_pause_sem, 0);
qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
+ qemu_sem_init(&ms->rp_state.rp_sem, 0);
}
/*
diff --git a/migration/ram.c b/migration/ram.c
index 3512383991..118460109b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -51,6 +51,7 @@
#include "qemu/rcu_queue.h"
#include "migration/colo.h"
#include "migration/block.h"
+#include "savevm.h"
/***********************************************************/
/* ram save/restore */
@@ -3164,6 +3165,38 @@ static bool ram_has_postcopy(void *opaque)
return migrate_postcopy_ram();
}
+/* Sync all the dirty bitmap with destination VM. */
+static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
+{
+ RAMBlock *block;
+ QEMUFile *file = s->to_dst_file;
+ int ramblock_count = 0;
+
+ trace_ram_dirty_bitmap_sync_start();
+
+ RAMBLOCK_FOREACH(block) {
+ qemu_savevm_send_recv_bitmap(file, block->idstr);
+ trace_ram_dirty_bitmap_request(block->idstr);
+ ramblock_count++;
+ }
+
+ trace_ram_dirty_bitmap_sync_wait();
+
+ /* Wait until all the ramblocks' dirty bitmap synced */
+ while (ramblock_count--) {
+ qemu_sem_wait(&s->rp_state.rp_sem);
+ }
+
+ trace_ram_dirty_bitmap_sync_complete();
+
+ return 0;
+}
+
+static void ram_dirty_bitmap_reload_notify(MigrationState *s)
+{
+ qemu_sem_post(&s->rp_state.rp_sem);
+}
+
/*
* Read the received bitmap, revert it as the initial dirty bitmap.
* This is only used when the postcopy migration is paused but wants
@@ -3238,12 +3271,25 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
trace_ram_dirty_bitmap_reload_complete(block->idstr);
+ /*
+ * We succeeded to sync bitmap for current ramblock. If this is
+ * the last one to sync, we need to notify the main send thread.
+ */
+ ram_dirty_bitmap_reload_notify(s);
+
ret = 0;
out:
free(le_bitmap);
return ret;
}
+static int ram_resume_prepare(MigrationState *s, void *opaque)
+{
+ RAMState *rs = *(RAMState **)opaque;
+
+ return ram_dirty_bitmap_sync_all(s, rs);
+}
+
static SaveVMHandlers savevm_ram_handlers = {
.save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
@@ -3255,6 +3301,7 @@ static SaveVMHandlers savevm_ram_handlers = {
.save_cleanup = ram_save_cleanup,
.load_setup = ram_load_setup,
.load_cleanup = ram_load_cleanup,
+ .resume_prepare = ram_resume_prepare,
};
void ram_mig_init(void)
diff --git a/migration/trace-events b/migration/trace-events
index be36fbccfe..53243e17ec 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -82,8 +82,12 @@ ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x"
ram_postcopy_send_discard_bitmap(void) ""
ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset: 0x%" PRIx64 " host: %p"
ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: 0x%zx len: 0x%zx"
+ram_dirty_bitmap_request(char *str) "%s"
ram_dirty_bitmap_reload_begin(char *str) "%s"
ram_dirty_bitmap_reload_complete(char *str) "%s"
+ram_dirty_bitmap_sync_start(void) ""
+ram_dirty_bitmap_sync_wait(void) ""
+ram_dirty_bitmap_sync_complete(void) ""
# migration/migration.c
await_return_path_close_on_source_close(void) ""
--
2.14.3
next prev parent reply other threads:[~2018-05-02 10:48 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-02 10:47 [Qemu-devel] [PATCH v8 00/24] Migration: postcopy failure recovery Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 01/24] migration: let incoming side use thread context Peter Xu
2018-05-08 14:36 ` Juan Quintela
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 02/24] migration: new postcopy-pause state Peter Xu
2018-05-08 15:16 ` Juan Quintela
2018-05-09 4:05 ` Peter Xu
2018-05-09 6:59 ` Juan Quintela
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 03/24] migration: implement "postcopy-pause" src logic Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 04/24] migration: allow dst vm pause on postcopy Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 05/24] migration: allow src return path to pause Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 06/24] migration: allow fault thread " Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 07/24] qmp: hmp: add migrate "resume" option Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 08/24] migration: rebuild channel on source Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 09/24] migration: new state "postcopy-recover" Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 10/24] migration: wakeup dst ram-load-thread for recover Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 11/24] migration: new cmd MIG_CMD_RECV_BITMAP Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 12/24] migration: new message MIG_RP_MSG_RECV_BITMAP Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 13/24] migration: new cmd MIG_CMD_POSTCOPY_RESUME Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 14/24] migration: new message MIG_RP_MSG_RESUME_ACK Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 15/24] migration: introduce SaveVMHandlers.resume_prepare Peter Xu
2018-05-02 10:47 ` Peter Xu [this message]
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 17/24] migration: setup ramstate for resume Peter Xu
2018-05-08 10:54 ` Dr. David Alan Gilbert
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 18/24] migration: final handshake for the resume Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 19/24] migration: init dst in migration_object_init too Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 20/24] qmp/migration: new command migrate-recover Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 21/24] hmp/migration: add migrate_recover command Peter Xu
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 22/24] migration: introduce lock for to_dst_file Peter Xu
2018-05-08 11:31 ` Dr. David Alan Gilbert
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 23/24] migration/qmp: add command migrate-pause Peter Xu
2018-05-08 13:20 ` Dr. David Alan Gilbert
2018-05-02 10:47 ` [Qemu-devel] [PATCH v8 24/24] migration/hmp: add migrate_pause command Peter Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180502104740.12123-17-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=a.perevalov@samsung.com \
--cc=aarcange@redhat.com \
--cc=berrange@redhat.com \
--cc=dgilbert@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).