From: Peter Xu <peterx@redhat.com>
To: qemu-devel@nongnu.org
Cc: peterx@redhat.com, Fabiano Rosas <farosas@suse.de>,
Juan Quintela <quintela@redhat.com>,
Joao Martins <joao.m.martins@oracle.com>
Subject: [PATCH 3/3] migration: Add per vmstate downtime tracepoints
Date: Thu, 26 Oct 2023 11:53:37 -0400 [thread overview]
Message-ID: <20231026155337.596281-4-peterx@redhat.com> (raw)
In-Reply-To: <20231026155337.596281-1-peterx@redhat.com>
We have a bunch of savevm_section* tracepoints, they're good to analyze
migration stream, but not always suitable if someone would like to analyze
the migration downtime. Two major problems:
- savevm_section* tracepoints are dumping all sections, we only care
about the sections that contribute to the downtime
- They don't have an identifier to show the type of sections, so no way
to filter downtime information either easily.
We can add type into the tracepoints, but instead of doing so, this patch
kept them untouched, instead of adding a bunch of downtime specific
tracepoints, so one can enable "vmstate_downtime*" tracepoints and get a
full picture of how the downtime is distributed across iterative and
non-iterative vmstate save/load.
Note that here both save() and load() need to be traced, because both of
them may contribute to the downtime. The contribution is not a simple "add
them together", though: consider when the src is doing a save() of device1
while the dest can be load()ing for device2, so they can happen
concurrently.
Tracking both sides make sense because device load() and save() can be
imbalanced, one device can save() super fast, but load() super slow, vice
versa. We can't figure that out without tracing both.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/savevm.c | 49 ++++++++++++++++++++++++++++++++++++++----
migration/trace-events | 2 ++
2 files changed, 47 insertions(+), 4 deletions(-)
diff --git a/migration/savevm.c b/migration/savevm.c
index 8622f229e5..cd6d6ba493 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1459,6 +1459,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
static
int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
{
+ int64_t start_ts_each, end_ts_each;
SaveStateEntry *se;
int ret;
@@ -1475,6 +1476,8 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
continue;
}
}
+
+ start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
trace_savevm_section_start(se->idstr, se->section_id);
save_section_header(f, se, QEMU_VM_SECTION_END);
@@ -1486,6 +1489,9 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
qemu_file_set_error(f, ret);
return -1;
}
+ end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+ trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id,
+ end_ts_each - start_ts_each);
}
return 0;
@@ -1496,6 +1502,7 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
bool inactivate_disks)
{
MigrationState *ms = migrate_get_current();
+ int64_t start_ts_each, end_ts_each;
JSONWriter *vmdesc = ms->vmdesc;
int vmdesc_len;
SaveStateEntry *se;
@@ -1507,11 +1514,17 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
continue;
}
+ start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+
ret = vmstate_save(f, se, vmdesc);
if (ret) {
qemu_file_set_error(f, ret);
return ret;
}
+
+ end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+ trace_vmstate_downtime_save("non-iterable", se->idstr, se->instance_id,
+ end_ts_each - start_ts_each);
}
if (inactivate_disks) {
@@ -2506,9 +2519,12 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se)
}
static int
-qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
+qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis,
+ uint8_t type)
{
+ bool trace_downtime = (type == QEMU_VM_SECTION_FULL);
uint32_t instance_id, version_id, section_id;
+ int64_t start_ts, end_ts;
SaveStateEntry *se;
char idstr[256];
int ret;
@@ -2557,12 +2573,23 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
return -EINVAL;
}
+ if (trace_downtime) {
+ start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+ }
+
ret = vmstate_load(f, se);
if (ret < 0) {
error_report("error while loading state for instance 0x%"PRIx32" of"
" device '%s'", instance_id, idstr);
return ret;
}
+
+ if (trace_downtime) {
+ end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+ trace_vmstate_downtime_load("non-iterable", se->idstr,
+ se->instance_id, end_ts - start_ts);
+ }
+
if (!check_section_footer(f, se)) {
return -EINVAL;
}
@@ -2571,8 +2598,11 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
}
static int
-qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
+qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis,
+ uint8_t type)
{
+ bool trace_downtime = (type == QEMU_VM_SECTION_END);
+ int64_t start_ts, end_ts;
uint32_t section_id;
SaveStateEntry *se;
int ret;
@@ -2597,12 +2627,23 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
return -EINVAL;
}
+ if (trace_downtime) {
+ start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+ }
+
ret = vmstate_load(f, se);
if (ret < 0) {
error_report("error while loading state section id %d(%s)",
section_id, se->idstr);
return ret;
}
+
+ if (trace_downtime) {
+ end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+ trace_vmstate_downtime_load("iterable", se->idstr,
+ se->instance_id, end_ts - start_ts);
+ }
+
if (!check_section_footer(f, se)) {
return -EINVAL;
}
@@ -2791,14 +2832,14 @@ retry:
switch (section_type) {
case QEMU_VM_SECTION_START:
case QEMU_VM_SECTION_FULL:
- ret = qemu_loadvm_section_start_full(f, mis);
+ ret = qemu_loadvm_section_start_full(f, mis, section_type);
if (ret < 0) {
goto out;
}
break;
case QEMU_VM_SECTION_PART:
case QEMU_VM_SECTION_END:
- ret = qemu_loadvm_section_part_end(f, mis);
+ ret = qemu_loadvm_section_part_end(f, mis, section_type);
if (ret < 0) {
goto out;
}
diff --git a/migration/trace-events b/migration/trace-events
index fa9486dffe..5820add1f3 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -48,6 +48,8 @@ savevm_state_cleanup(void) ""
savevm_state_complete_precopy(void) ""
vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s"
vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s"
+vmstate_downtime_save(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64
+vmstate_downtime_load(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64
postcopy_pause_incoming(void) ""
postcopy_pause_incoming_continued(void) ""
postcopy_page_req_sync(void *host_addr) "sync page req %p"
--
2.41.0
next prev parent reply other threads:[~2023-10-26 15:54 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-26 15:53 [PATCH 0/3] migration: Downtime tracepoints Peter Xu
2023-10-26 15:53 ` [PATCH 1/3] migration: Set downtime_start even for postcopy Peter Xu
2023-10-26 17:05 ` Fabiano Rosas
2023-10-26 15:53 ` [PATCH 2/3] migration: Add migration_downtime_start|end() helpers Peter Xu
2023-10-26 17:11 ` Fabiano Rosas
2023-10-26 15:53 ` Peter Xu [this message]
2023-10-26 16:06 ` [PATCH 0/3] migration: Downtime tracepoints Joao Martins
2023-10-26 17:03 ` Peter Xu
2023-10-26 18:18 ` Peter Xu
2023-10-26 19:33 ` Joao Martins
2023-10-26 20:07 ` Peter Xu
2023-10-27 8:58 ` Joao Martins
2023-10-27 14:41 ` Peter Xu
2023-10-27 22:17 ` Joao Martins
2023-10-30 15:13 ` Peter Xu
2023-10-30 16:09 ` Peter Xu
2023-10-30 16:11 ` Joao Martins
2023-10-26 19:01 ` [PATCH 4/3] migration: Add tracepoints for downtime checkpoints Peter Xu
2023-10-26 19:43 ` Joao Martins
2023-10-26 20:08 ` Peter Xu
2023-10-26 20:14 ` Peter Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231026155337.596281-4-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=farosas@suse.de \
--cc=joao.m.martins@oracle.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).