From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Julien Grall <julien.grall@arm.com>,
Wei Liu <wei.liu2@citrix.com>, Jan Beulich <JBeulich@suse.com>
Subject: [PATCH for-4.10] x86: Avoid corruption on migrate for vcpus using CPUID Faulting
Date: Sat, 25 Nov 2017 18:15:28 +0000 [thread overview]
Message-ID: <1511633728-680-1-git-send-email-andrew.cooper3@citrix.com> (raw)
Xen 4.8 and later virtualises CPUID Faulting support for guests. However, the
value of MSR_MISC_FEATURES_ENABLES is omitted from the vcpu state, meaning
that the current cpuid faulting setting is lost on migrate/suspend/resume.
To move this MSR, use the new guest_{rd,wr}msr() infrastructure. This avoids
duplicating or opencoding the feature check and value logic, as well as
abstracting away the internal value representation. One small adjustment to
guest_wrmsr() is required to cope with being called in toolstack context.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Julien Grall <julien.grall@arm.com>
This needs backporting to 4.8 and later, and therefore should be considered
for 4.10 at this point.
---
xen/arch/x86/domctl.c | 46 +++++++++++++++++++++++++++++++++++++++++++++-
xen/arch/x86/hvm/hvm.c | 40 +++++++++++++++++++++++++++++++++++++++-
xen/arch/x86/msr.c | 3 ++-
xen/include/asm-x86/msr.h | 3 +++
4 files changed, 89 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 80b4df9..e98c4a3 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1286,7 +1286,7 @@ long arch_do_domctl(
struct xen_domctl_vcpu_msrs *vmsrs = &domctl->u.vcpu_msrs;
struct xen_domctl_vcpu_msr msr;
struct vcpu *v;
- uint32_t nr_msrs = 0;
+ uint32_t nr_msrs = 1;
ret = -ESRCH;
if ( (vmsrs->vcpu >= d->max_vcpus) ||
@@ -1311,10 +1311,49 @@ long arch_do_domctl(
vmsrs->msr_count = nr_msrs;
else
{
+ static const uint32_t msrs[] = {
+ MSR_INTEL_MISC_FEATURES_ENABLES,
+ };
+ unsigned int j;
+
i = 0;
vcpu_pause(v);
+ for ( j = 0; j < ARRAY_SIZE(msrs); ++j )
+ {
+ uint64_t val;
+ int rc = guest_rdmsr(v, msrs[j], &val);
+
+ /*
+ * It is the programmers responsibility to ensure that
+ * msrs[] contain generally-readable MSRs.
+ * X86EMUL_EXCEPTION here implies a missing feature.
+ */
+ if ( rc == X86EMUL_EXCEPTION )
+ continue;
+
+ if ( rc != X86EMUL_OKAY )
+ {
+ ASSERT_UNREACHABLE();
+ ret = -ENXIO;
+ break;
+ }
+
+ if ( !val )
+ continue; /* Skip empty MSRs. */
+
+ if ( i < vmsrs->msr_count && !ret )
+ {
+ msr.index = msrs[j];
+ msr.reserved = 0;
+ msr.value = val;
+ if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
+ ret = -EFAULT;
+ }
+ ++i;
+ }
+
if ( boot_cpu_has(X86_FEATURE_DBEXT) )
{
unsigned int j;
@@ -1375,6 +1414,11 @@ long arch_do_domctl(
switch ( msr.index )
{
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
+ if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
+ break;
+ continue;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
(msr.value >> 32) )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c765a5e..7f18f3b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1322,11 +1322,14 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
}
#define HVM_CPU_MSR_SIZE(cnt) offsetof(struct hvm_msr, msr[cnt])
-static unsigned int __read_mostly msr_count_max;
+static unsigned int __read_mostly msr_count_max = 1;
static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
{
struct vcpu *v;
+ static const uint32_t msrs[] = {
+ MSR_INTEL_MISC_FEATURES_ENABLES,
+ };
for_each_vcpu ( d, v )
{
@@ -1340,6 +1343,32 @@ static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
ctxt = (struct hvm_msr *)&h->data[h->cur];
ctxt->count = 0;
+ for ( i = 0; i < ARRAY_SIZE(msrs); ++i )
+ {
+ uint64_t val;
+ int rc = guest_rdmsr(v, msrs[i], &val);
+
+ /*
+ * It is the programmers responsibility to ensure that msrs[]
+ * contain generally-readable MSRs. X86EMUL_EXCEPTION here
+ * implies a missing feature.
+ */
+ if ( rc == X86EMUL_EXCEPTION )
+ continue;
+
+ if ( rc != X86EMUL_OKAY )
+ {
+ ASSERT_UNREACHABLE();
+ return -ENXIO;
+ }
+
+ if ( !val )
+ continue; /* Skip empty MSRs. */
+
+ ctxt->msr[ctxt->count].index = msrs[i];
+ ctxt->msr[ctxt->count++].val = val;
+ }
+
if ( hvm_funcs.save_msr )
hvm_funcs.save_msr(v, ctxt);
@@ -1426,6 +1455,15 @@ static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
{
switch ( ctxt->msr[i].index )
{
+ int rc;
+
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
+ rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val);
+
+ if ( rc != X86EMUL_OKAY )
+ err = -ENXIO;
+ break;
+
default:
if ( !ctxt->msr[i]._rsvd )
err = -ENXIO;
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index baba44f..31983ed 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -150,6 +150,7 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
{
+ const struct vcpu *curr = current;
struct domain *d = v->domain;
struct msr_domain_policy *dp = d->arch.msr;
struct msr_vcpu_policy *vp = v->arch.msr;
@@ -176,7 +177,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
vp->misc_features_enables.cpuid_faulting =
val & MSR_MISC_FEATURES_CPUID_FAULTING;
- if ( is_hvm_domain(d) && cpu_has_cpuid_faulting &&
+ if ( v == curr && is_hvm_domain(d) && cpu_has_cpuid_faulting &&
(old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
ctxt_switch_levelling(v);
break;
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 751fa25..41732a4 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -231,6 +231,9 @@ int init_vcpu_msr_policy(struct vcpu *v);
* not (yet) handled by it and must be processed by legacy handlers. Such
* behaviour is needed for transition period until all rd/wrmsr are handled
* by the new MSR infrastructure.
+ *
+ * These functions are also used by the migration logic, so need to cope with
+ * being used outside of v's context.
*/
int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next reply other threads:[~2017-11-25 18:15 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-11-25 18:15 Andrew Cooper [this message]
2017-11-27 9:53 ` [PATCH for-4.10] x86: Avoid corruption on migrate for vcpus using CPUID Faulting Jan Beulich
2017-11-27 11:37 ` Andrew Cooper
2017-11-27 13:19 ` Jan Beulich
2017-11-27 13:02 ` [PATCH v2 " Andrew Cooper
2017-11-27 14:41 ` Jan Beulich
2017-11-30 18:54 ` Andrew Cooper
2017-12-01 11:21 ` Jan Beulich
2017-12-01 16:15 ` Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1511633728-680-1-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=julien.grall@arm.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).