From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xen.org, Ian.Campbell@citrix.com,
ian.jackson@eu.citrix.com, stefano.stabellini@eu.citrix.com,
wei.liu2@citrix.com, andrew.cooper3@citrix.com
Cc: Juergen Gross <jgross@suse.com>
Subject: [PATCH v4 3/4] libxc: stop migration in case of p2m list structural changes
Date: Thu, 7 Jan 2016 13:36:53 +0100 [thread overview]
Message-ID: <1452170214-17821-4-git-send-email-jgross@suse.com> (raw)
In-Reply-To: <1452170214-17821-1-git-send-email-jgross@suse.com>
With support of the virtual mapped linear p2m list for migration it is
now possible to detect structural changes of the p2m list which before
would either lead to a crashing or otherwise wrong behaving domU.
A guest supporting the linear p2m list will increment the
p2m_generation counter located in the shared info page before and after
each modification of a mapping related to the p2m list. A change of
that counter can be detected by the tools and reacted upon.
As such a change should occur only very rarely once the domU is up the
most simple reaction is to cancel migration in such an event.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
---
tools/libxc/xc_sr_common.h | 12 +++++++++++
tools/libxc/xc_sr_save.c | 7 ++++++-
tools/libxc/xc_sr_save_x86_hvm.c | 7 +++++++
tools/libxc/xc_sr_save_x86_pv.c | 45 ++++++++++++++++++++++++++++++++++++++++
4 files changed, 70 insertions(+), 1 deletion(-)
diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index 9aecde2..60b43e8 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -83,6 +83,15 @@ struct xc_sr_save_ops
int (*end_of_checkpoint)(struct xc_sr_context *ctx);
/**
+ * Check state of guest to decide whether it makes sense to continue
+ * migration. This is called in each iteration or checkpoint to check
+ * whether all criteria for the migration are still met. If that's not
+ * the case either migration is cancelled via a bad rc or the situation
+ * is handled, e.g. by sending appropriate records.
+ */
+ int (*check_vm_state)(struct xc_sr_context *ctx);
+
+ /**
* Clean up the local environment. Will be called exactly once, either
* after a successful save, or upon encountering an error.
*/
@@ -280,6 +289,9 @@ struct xc_sr_context
/* Read-only mapping of guests shared info page */
shared_info_any_t *shinfo;
+ /* p2m generation count for verifying validity of local p2m. */
+ uint64_t p2m_generation;
+
union
{
struct
diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c
index cefcef5..88d85ef 100644
--- a/tools/libxc/xc_sr_save.c
+++ b/tools/libxc/xc_sr_save.c
@@ -394,7 +394,8 @@ static int send_dirty_pages(struct xc_sr_context *ctx,
DPRINTF("Bitmap contained more entries than expected...");
xc_report_progress_step(xch, entries, entries);
- return 0;
+
+ return ctx->save.ops.check_vm_state(ctx);
}
/*
@@ -751,6 +752,10 @@ static int save(struct xc_sr_context *ctx, uint16_t guest_type)
if ( rc )
goto err;
+ rc = ctx->save.ops.check_vm_state(ctx);
+ if ( rc )
+ goto err;
+
if ( ctx->save.live )
rc = send_domain_memory_live(ctx);
else if ( ctx->save.checkpointed )
diff --git a/tools/libxc/xc_sr_save_x86_hvm.c b/tools/libxc/xc_sr_save_x86_hvm.c
index f3d6cee..e347b3b 100644
--- a/tools/libxc/xc_sr_save_x86_hvm.c
+++ b/tools/libxc/xc_sr_save_x86_hvm.c
@@ -175,6 +175,12 @@ static int x86_hvm_start_of_checkpoint(struct xc_sr_context *ctx)
return 0;
}
+static int x86_hvm_check_vm_state(struct xc_sr_context *ctx)
+{
+ /* no-op */
+ return 0;
+}
+
static int x86_hvm_end_of_checkpoint(struct xc_sr_context *ctx)
{
int rc;
@@ -221,6 +227,7 @@ struct xc_sr_save_ops save_ops_x86_hvm =
.start_of_stream = x86_hvm_start_of_stream,
.start_of_checkpoint = x86_hvm_start_of_checkpoint,
.end_of_checkpoint = x86_hvm_end_of_checkpoint,
+ .check_vm_state = x86_hvm_check_vm_state,
.cleanup = x86_hvm_cleanup,
};
diff --git a/tools/libxc/xc_sr_save_x86_pv.c b/tools/libxc/xc_sr_save_x86_pv.c
index 5448f32..4deb58f 100644
--- a/tools/libxc/xc_sr_save_x86_pv.c
+++ b/tools/libxc/xc_sr_save_x86_pv.c
@@ -274,6 +274,39 @@ err:
}
/*
+ * Get p2m_generation count.
+ * Returns an error if the generation count has changed since the last call.
+ */
+static int get_p2m_generation(struct xc_sr_context *ctx)
+{
+ uint64_t p2m_generation;
+ int rc;
+
+ p2m_generation = GET_FIELD(ctx->x86_pv.shinfo, arch.p2m_generation,
+ ctx->x86_pv.width);
+
+ rc = (p2m_generation == ctx->x86_pv.p2m_generation) ? 0 : -1;
+ ctx->x86_pv.p2m_generation = p2m_generation;
+
+ return rc;
+}
+
+static int x86_pv_check_vm_state_p2m_list(struct xc_sr_context *ctx)
+{
+ xc_interface *xch = ctx->xch;
+ int rc;
+
+ if ( !ctx->save.live )
+ return 0;
+
+ rc = get_p2m_generation(ctx);
+ if ( rc )
+ ERROR("p2m generation count changed. Migration aborted.");
+
+ return rc;
+}
+
+/*
* Map the guest p2m frames specified via a cr3 value, a virtual address, and
* the maximum pfn. PTE entries are 64 bits for both, 32 and 64 bit guests as
* in 32 bit case we support PAE guests only.
@@ -297,6 +330,8 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t p2m_cr3)
return -1;
}
+ get_p2m_generation(ctx);
+
p2m_vaddr = GET_FIELD(ctx->x86_pv.shinfo, arch.p2m_vaddr,
ctx->x86_pv.width);
fpp = PAGE_SIZE / ctx->x86_pv.width;
@@ -430,6 +465,7 @@ static int map_p2m(struct xc_sr_context *ctx)
{
uint64_t p2m_cr3;
+ ctx->x86_pv.p2m_generation = ~0ULL;
ctx->x86_pv.max_pfn = GET_FIELD(ctx->x86_pv.shinfo, arch.max_pfn,
ctx->x86_pv.width) - 1;
p2m_cr3 = GET_FIELD(ctx->x86_pv.shinfo, arch.p2m_cr3, ctx->x86_pv.width);
@@ -1069,6 +1105,14 @@ static int x86_pv_end_of_checkpoint(struct xc_sr_context *ctx)
return 0;
}
+static int x86_pv_check_vm_state(struct xc_sr_context *ctx)
+{
+ if ( ctx->x86_pv.p2m_generation == ~0ULL )
+ return 0;
+
+ return x86_pv_check_vm_state_p2m_list(ctx);
+}
+
/*
* save_ops function. Cleanup.
*/
@@ -1096,6 +1140,7 @@ struct xc_sr_save_ops save_ops_x86_pv =
.start_of_stream = x86_pv_start_of_stream,
.start_of_checkpoint = x86_pv_start_of_checkpoint,
.end_of_checkpoint = x86_pv_end_of_checkpoint,
+ .check_vm_state = x86_pv_check_vm_state,
.cleanup = x86_pv_cleanup,
};
--
2.6.2
next prev parent reply other threads:[~2016-01-07 12:36 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-07 12:36 [PATCH v4 0/4] support linear p2m list in migrate stream v2 Juergen Gross
2016-01-07 12:36 ` [PATCH v4 1/4] libxc: split mapping p2m leaves into a separate function Juergen Gross
2016-01-07 12:36 ` [PATCH v4 2/4] libxc: support of linear p2m list for migration of pv-domains Juergen Gross
2016-01-07 12:36 ` Juergen Gross [this message]
2016-01-07 12:36 ` [PATCH v4 4/4] libxc: set flag for support of linear p2m list in domain builder Juergen Gross
2016-01-07 13:23 ` [PATCH v4 0/4] support linear p2m list in migrate stream v2 Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1452170214-17821-4-git-send-email-jgross@suse.com \
--to=jgross@suse.com \
--cc=Ian.Campbell@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).