From: Wen Congyang <wency@cn.fujitsu.com>
To: xen devel <xen-devel@lists.xen.org>
Cc: Ian Campbell <Ian.Campbell@citrix.com>,
Wen Congyang <wency@cn.fujitsu.com>,
Ian Jackson <Ian.Jackson@eu.citrix.com>,
Jiang Yunhong <yunhong.jiang@intel.com>,
Dong Eddie <eddie.dong@intel.com>,
Yang Hongyang <yanghy@cn.fujitsu.com>,
Lai Jiangshan <laijs@cn.fujitsu.com>
Subject: [RFC Patch 25/25] sync mmu before resuming secondary vm
Date: Fri, 18 Jul 2014 19:39:10 +0800 [thread overview]
Message-ID: <1405683551-12579-26-git-send-email-wency@cn.fujitsu.com> (raw)
In-Reply-To: <1405683551-12579-1-git-send-email-wency@cn.fujitsu.com>
In out test, we find secondary vm will be bluescreen due to memory
related problem. If we sync mmu, the problem will disappear.
TODO: only vmx+ept is done.
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
---
tools/libxc/xc_domain.c | 9 +++++++++
tools/libxc/xenctrl.h | 2 ++
tools/libxl/libxl_colo_restore.c | 6 +++++-
xen/arch/x86/domctl.c | 15 +++++++++++++++
xen/arch/x86/hvm/save.c | 6 ++++++
xen/arch/x86/hvm/vmx/vmcs.c | 8 ++++++++
xen/arch/x86/hvm/vmx/vmx.c | 1 +
xen/include/asm-x86/hvm/hvm.h | 1 +
xen/include/asm-x86/hvm/vmx/vmcs.h | 1 +
xen/include/public/domctl.h | 1 +
xen/include/xen/hvm/save.h | 2 ++
11 files changed, 51 insertions(+), 1 deletion(-)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 0230c6c..0b47bdd 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2123,6 +2123,15 @@ int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid,
return do_domctl(xch, &domctl);
}
+int xc_domain_hvm_sync_mmu(xc_interface *xch, uint32_t domid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_hvm_sync_mmu;
+ domctl.domain = domid;
+ return do_domctl(xch, &domctl);
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 3578b09..a83364a 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -961,6 +961,8 @@ int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid,
uint32_t max_port);
+int xc_domain_hvm_sync_mmu(xc_interface *xch, uint32_t domid);
+
/*
* CPUPOOL MANAGEMENT FUNCTIONS
*/
diff --git a/tools/libxl/libxl_colo_restore.c b/tools/libxl/libxl_colo_restore.c
index aea3feb..730b492 100644
--- a/tools/libxl/libxl_colo_restore.c
+++ b/tools/libxl/libxl_colo_restore.c
@@ -124,11 +124,15 @@ static void colo_resume_vm(libxl__egc *egc,
STATE_AO_GC(crs->ao);
if (!crs->saved_cb) {
- /* TODO: sync mmu for hvm? */
+ rc = xc_domain_hvm_sync_mmu(CTX->xch, crs->domid);
+ if (rc)
+ goto fail;
+
rc = libxl__domain_resume(gc, crs->domid, 0, 1);
if (rc)
LOG(ERROR, "cannot resume secondary vm");
+fail:
crcs->callback(egc, crcs, rc);
return;
}
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index d62c715..d0dfad7 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1395,6 +1395,21 @@ long arch_do_domctl(
}
break;
+ case XEN_DOMCTL_hvm_sync_mmu:
+ {
+ struct domain *d;
+
+ ret = -ESRCH;
+ d = rcu_lock_domain_by_id(domctl->domain);
+ if ( d != NULL )
+ {
+ arch_hvm_sync_mmu(d);
+ rcu_unlock_domain(d);
+ ret = 0;
+ }
+ }
+ break;
+
default:
ret = iommu_do_domctl(domctl, d, u_domctl);
break;
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index 6af19be..7a07ebf 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -79,6 +79,12 @@ int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
return 0;
}
+void arch_hvm_sync_mmu(struct domain *d)
+{
+ if (hvm_funcs.sync_mmu)
+ hvm_funcs.sync_mmu(d);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 8ffc562..4be9b4d 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -596,6 +596,14 @@ void vmx_cpu_down(void)
local_irq_restore(flags);
}
+void vmx_sync_mmu(struct domain *d)
+{
+ ept_sync_domain(p2m_get_hostp2m(d));
+
+ /* flush tlb */
+ flush_all(FLUSH_TLB_GLOBAL);
+}
+
struct foreign_vmcs {
struct vcpu *v;
unsigned int count;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index eb73412..b46b4dd 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1719,6 +1719,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
.event_pending = vmx_event_pending,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
+ .sync_mmu = vmx_sync_mmu,
.cpuid_intercept = vmx_cpuid_intercept,
.wbinvd_intercept = vmx_wbinvd_intercept,
.fpu_dirty_intercept = vmx_fpu_dirty_intercept,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0ebd478..b4f89a7 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -151,6 +151,7 @@ struct hvm_function_table {
int (*cpu_up)(void);
void (*cpu_down)(void);
+ void (*sync_mmu)(struct domain *d);
/* Copy up to 15 bytes from cached instruction bytes at current rIP. */
unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 215d93c..664741a 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -29,6 +29,7 @@ extern int vmx_cpu_up_prepare(unsigned int cpu);
extern void vmx_cpu_dead(unsigned int cpu);
extern int vmx_cpu_up(void);
extern void vmx_cpu_down(void);
+extern void vmx_sync_mmu(struct domain *d);
extern void vmx_save_host_msrs(void);
struct vmcs_struct {
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 5b11bbf..11e5a26 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1008,6 +1008,7 @@ struct xen_domctl {
#define XEN_DOMCTL_cacheflush 71
#define XEN_DOMCTL_get_vcpu_msrs 72
#define XEN_DOMCTL_set_vcpu_msrs 73
+#define XEN_DOMCTL_hvm_sync_mmu 74
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
diff --git a/xen/include/xen/hvm/save.h b/xen/include/xen/hvm/save.h
index ae6f0bb..049fdb8 100644
--- a/xen/include/xen/hvm/save.h
+++ b/xen/include/xen/hvm/save.h
@@ -135,4 +135,6 @@ struct hvm_save_header;
void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr);
int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr);
+void arch_hvm_sync_mmu(struct domain *d);
+
#endif /* __XEN_HVM_SAVE_H__ */
--
1.9.3
next prev parent reply other threads:[~2014-07-18 11:39 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-18 11:38 [RFC Patch 00/25] COarse-grain LOck-stepping Virtual Machines for Non-stop Service Wen Congyang
2014-07-18 11:38 ` [RFC Patch 01/25] copy the correct page to memory Wen Congyang
2014-07-18 11:38 ` [RFC Patch 02/25] csum the correct page Wen Congyang
2014-07-18 11:38 ` [RFC Patch 03/25] don't zero out ioreq page Wen Congyang
2014-07-18 11:38 ` [RFC Patch 04/25] don't touch remus in remus_device Wen Congyang
2014-07-18 11:38 ` [RFC Patch 05/25] rename remus device to checkpoint device Wen Congyang
2014-07-18 11:38 ` [RFC Patch 06/25] adjust the indentation Wen Congyang
2014-07-18 11:38 ` [RFC Patch 07/25] Refactor domain_suspend_callback_common() Wen Congyang
2014-07-18 11:38 ` [RFC Patch 08/25] Update libxl__domain_resume() for colo Wen Congyang
2014-07-18 11:38 ` [RFC Patch 09/25] Update libxl__domain_suspend_common_switch_qemu_logdirty() " Wen Congyang
2014-07-18 11:38 ` [RFC Patch 10/25] Introduce a new internal API libxl__domain_unpause() Wen Congyang
2014-07-18 11:38 ` [RFC Patch 11/25] Update libxl__domain_unpause() to support qemu-xen Wen Congyang
2014-07-18 11:38 ` [RFC Patch 12/25] support to resume uncooperative HVM guests Wen Congyang
2014-07-18 11:38 ` [RFC Patch 13/25] update datecopier to support sending data only Wen Congyang
2014-07-18 11:38 ` [RFC Patch 14/25] introduce a new API to aync read data from fd Wen Congyang
2014-07-18 11:39 ` [RFC Patch 15/25] Update libxl_save_msgs_gen.pl to support return data from xl to xc Wen Congyang
2014-07-18 11:39 ` [RFC Patch 16/25] Allow slave sends data to master Wen Congyang
2014-07-18 11:39 ` [RFC Patch 17/25] secondary vm suspend/resume/checkpoint code Wen Congyang
2014-07-18 11:39 ` [RFC Patch 18/25] primary vm suspend/get_dirty_pfn/resume/checkpoint code Wen Congyang
2014-07-18 11:39 ` [RFC Patch 19/25] xc_domain_save: flush cache before calling callbacks->postcopy() in colo mode Wen Congyang
2014-07-18 11:39 ` [RFC Patch 20/25] COLO: xc related codes Wen Congyang
2014-07-18 11:39 ` [RFC Patch 21/25] send store mfn and console mfn to xl before resuming secondary vm Wen Congyang
2014-07-18 11:39 ` [RFC Patch 22/25] implement the cmdline for COLO Wen Congyang
2014-07-18 11:39 ` [RFC Patch 23/25] HACK: do checkpoint per 20ms Wen Congyang
2014-07-18 11:39 ` [RFC Patch 24/25] fix vm entry fail Wen Congyang
2014-07-24 10:40 ` Tim Deegan
2014-07-25 5:39 ` Wen Congyang
2014-08-07 6:52 ` Wen Congyang
2014-07-18 11:39 ` Wen Congyang [this message]
2014-07-24 10:59 ` [RFC Patch 25/25] sync mmu before resuming secondary vm Tim Deegan
2014-07-25 5:46 ` Wen Congyang
2014-08-07 7:46 ` Wen Congyang
2014-07-18 11:39 ` [RFC Patch 26/25] Introduce "xen-load-devices-state" Wen Congyang
2014-07-18 11:43 ` [RFC Patch 00/25] COarse-grain LOck-stepping Virtual Machines for Non-stop Service Wen Congyang
2014-07-18 14:18 ` Andrew Cooper
2014-07-18 14:30 ` Wen Congyang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1405683551-12579-26-git-send-email-wency@cn.fujitsu.com \
--to=wency@cn.fujitsu.com \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=eddie.dong@intel.com \
--cc=laijs@cn.fujitsu.com \
--cc=xen-devel@lists.xen.org \
--cc=yanghy@cn.fujitsu.com \
--cc=yunhong.jiang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).