From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
"Wei Liu" <wei.liu2@citrix.com>,
"Jan Beulich" <JBeulich@suse.com>,
"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH 3/6] x86/msr: Clean up the MSR_{PLATFORM_INFO, MISC_FEATURES_ENABLES} constants
Date: Tue, 26 Jun 2018 14:18:15 +0100 [thread overview]
Message-ID: <1530019098-7058-4-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1530019098-7058-1-git-send-email-andrew.cooper3@citrix.com>
These MSRs, while being Intel specific, are used to offer virtualised
CPUID faulting support on AMD hardware, so remove the INTEL infix.
The bit position constants are used by guest_rdmsr(), but the logic can
be expressed using MASK_INSR() which allows the removal of the bit
position constants.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
---
xen/arch/x86/cpu/common.c | 16 +++++++---------
xen/arch/x86/domctl.c | 4 ++--
xen/arch/x86/hvm/hvm.c | 4 ++--
xen/arch/x86/msr.c | 30 +++++++++++++++---------------
xen/arch/x86/x86_emulate/x86_emulate.c | 6 +++---
xen/include/asm-x86/msr-index.h | 15 ++++++---------
xen/include/asm-x86/msr.h | 4 ++--
7 files changed, 37 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 3548b12..a83077f 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -115,19 +115,17 @@ bool __init probe_cpuid_faulting(void)
uint64_t val;
int rc;
- if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
+ if ((rc = rdmsr_safe(MSR_PLATFORM_INFO, val)) == 0)
{
struct msr_domain_policy *dp = &raw_msr_domain_policy;
dp->plaform_info.available = true;
- if (val & MSR_PLATFORM_INFO_CPUID_FAULTING)
+ if (val & PLATFORM_INFO_CPUID_FAULTING)
dp->plaform_info.cpuid_faulting = true;
}
- if (rc ||
- !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
- rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
- this_cpu(msr_misc_features)))
+ if (rc || !(val & PLATFORM_INFO_CPUID_FAULTING) ||
+ rdmsr_safe(MSR_MISC_FEATURES_ENABLES, this_cpu(msr_misc_features)))
{
setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING);
return false;
@@ -145,12 +143,12 @@ static void set_cpuid_faulting(bool enable)
uint64_t *this_misc_features = &this_cpu(msr_misc_features);
uint64_t val = *this_misc_features;
- if (!!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable)
+ if ((val & MISC_FEATURES_CPUID_FAULTING) == enable)
return;
- val ^= MSR_MISC_FEATURES_CPUID_FAULTING;
+ val ^= MISC_FEATURES_CPUID_FAULTING;
- wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val);
+ wrmsrl(MSR_MISC_FEATURES_ENABLES, val);
*this_misc_features = val;
}
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 8fbbf3a..6bbde04 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1248,7 +1248,7 @@ long arch_do_domctl(
struct vcpu *v;
static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
- MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_MISC_FEATURES_ENABLES,
};
uint32_t nr_msrs = ARRAY_SIZE(msrs_to_send);
@@ -1372,7 +1372,7 @@ long arch_do_domctl(
switch ( msr.index )
{
case MSR_SPEC_CTRL:
- case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_MISC_FEATURES_ENABLES:
if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
break;
continue;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4e247d0..5823620 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1335,7 +1335,7 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
#define HVM_CPU_MSR_SIZE(cnt) offsetof(struct hvm_msr, msr[cnt])
static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
- MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_MISC_FEATURES_ENABLES,
};
static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
@@ -1471,7 +1471,7 @@ static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
int rc;
case MSR_SPEC_CTRL:
- case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_MISC_FEATURES_ENABLES:
rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val);
if ( rc != X86EMUL_OKAY )
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 1e12ccb..0162890 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -36,7 +36,7 @@ struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
static void __init calculate_raw_policy(void)
{
- /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
+ /* 0x000000ce - MSR_PLATFORM_INFO */
/* Was already added by probe_cpuid_faulting() */
}
@@ -46,7 +46,7 @@ static void __init calculate_host_policy(void)
*dp = raw_msr_domain_policy;
- /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
+ /* 0x000000ce - MSR_PLATFORM_INFO */
/* probe_cpuid_faulting() sanity checks presence of MISC_FEATURES_ENABLES */
dp->plaform_info.cpuid_faulting = cpu_has_cpuid_faulting;
}
@@ -61,7 +61,7 @@ static void __init calculate_hvm_max_policy(void)
*dp = host_msr_domain_policy;
- /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
+ /* 0x000000ce - MSR_PLATFORM_INFO */
/* It's always possible to emulate CPUID faulting for HVM guests */
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
@@ -70,7 +70,7 @@ static void __init calculate_hvm_max_policy(void)
dp->plaform_info.cpuid_faulting = true;
}
- /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
+ /* 0x00000140 - MSR_MISC_FEATURES_ENABLES */
vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
}
@@ -81,7 +81,7 @@ static void __init calculate_pv_max_policy(void)
*dp = host_msr_domain_policy;
- /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
+ /* 0x00000140 - MSR_MISC_FEATURES_ENABLES */
vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
}
@@ -159,22 +159,22 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
*val = vp->spec_ctrl.raw;
break;
- case MSR_INTEL_PLATFORM_INFO:
+ case MSR_PLATFORM_INFO:
if ( !dp->plaform_info.available )
goto gp_fault;
- *val = (uint64_t)dp->plaform_info.cpuid_faulting <<
- _MSR_PLATFORM_INFO_CPUID_FAULTING;
+ *val = MASK_INSR(dp->plaform_info.cpuid_faulting,
+ PLATFORM_INFO_CPUID_FAULTING);
break;
case MSR_ARCH_CAPABILITIES:
/* Not implemented yet. */
goto gp_fault;
- case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_MISC_FEATURES_ENABLES:
if ( !vp->misc_features_enables.available )
goto gp_fault;
- *val = (uint64_t)vp->misc_features_enables.cpuid_faulting <<
- _MSR_MISC_FEATURES_CPUID_FAULTING;
+ *val = MASK_INSR(vp->misc_features_enables.cpuid_faulting,
+ MISC_FEATURES_CPUID_FAULTING);
break;
default:
@@ -199,7 +199,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
{
uint64_t rsvd;
- case MSR_INTEL_PLATFORM_INFO:
+ case MSR_PLATFORM_INFO:
case MSR_ARCH_CAPABILITIES:
/* Read-only */
goto gp_fault;
@@ -254,7 +254,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
wrmsrl(MSR_PRED_CMD, val);
break;
- case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_MISC_FEATURES_ENABLES:
{
bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
@@ -263,13 +263,13 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
rsvd = ~0ull;
if ( dp->plaform_info.cpuid_faulting )
- rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
+ rsvd &= ~MISC_FEATURES_CPUID_FAULTING;
if ( val & rsvd )
goto gp_fault;
vp->misc_features_enables.cpuid_faulting =
- val & MSR_MISC_FEATURES_CPUID_FAULTING;
+ val & MISC_FEATURES_CPUID_FAULTING;
if ( v == curr && is_hvm_domain(d) && cpu_has_cpuid_faulting &&
(old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index e372c4b..1a84c90 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -6525,9 +6525,9 @@ x86_emulate(
msr_val = 0;
fail_if(ops->cpuid == NULL);
- /* Speculatively read MSR_INTEL_MISC_FEATURES_ENABLES. */
+ /* Speculatively read MSR_MISC_FEATURES_ENABLES. */
if ( ops->read_msr && !mode_ring0() &&
- (rc = ops->read_msr(MSR_INTEL_MISC_FEATURES_ENABLES,
+ (rc = ops->read_msr(MSR_MISC_FEATURES_ENABLES,
&msr_val, ctxt)) == X86EMUL_EXCEPTION )
{
/* Not implemented. Squash the exception and proceed normally. */
@@ -6537,7 +6537,7 @@ x86_emulate(
if ( rc != X86EMUL_OKAY )
goto done;
- generate_exception_if((msr_val & MSR_MISC_FEATURES_CPUID_FAULTING),
+ generate_exception_if(msr_val & MISC_FEATURES_CPUID_FAULTING,
EXC_GP, 0); /* Faulting active? (Inc. CPL test) */
rc = ops->cpuid(_regs.eax, _regs.ecx, &cpuid_leaf, ctxt);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 2c9b75f..48d80e9 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -24,12 +24,18 @@
#define MSR_PRED_CMD 0x00000049
#define PRED_CMD_IBPB (_AC(1, ULL) << 0)
+#define MSR_PLATFORM_INFO 0x000000ce
+#define PLATFORM_INFO_CPUID_FAULTING (_AC(1, ULL) << 31)
+
#define MSR_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAPS_RDCL_NO (_AC(1, ULL) << 0)
#define ARCH_CAPS_IBRS_ALL (_AC(1, ULL) << 1)
#define ARCH_CAPS_RSBA (_AC(1, ULL) << 2)
#define ARCH_CAPS_SSB_NO (_AC(1, ULL) << 4)
+#define MSR_MISC_FEATURES_ENABLES 0x00000140
+#define MISC_FEATURES_CPUID_FAULTING (_AC(1, ULL) << 0)
+
#define MSR_EFER 0xc0000080 /* Extended Feature Enable Register */
#define EFER_SCE (_AC(1, ULL) << 0) /* SYSCALL Enable */
#define EFER_LME (_AC(1, ULL) << 8) /* Long Mode Enable */
@@ -534,15 +540,6 @@
#define MSR_INTEL_MASK_V3_CPUID80000001 0x00000133
#define MSR_INTEL_MASK_V3_CPUIDD_01 0x00000134
-/* Intel cpuid faulting MSRs */
-#define MSR_INTEL_PLATFORM_INFO 0x000000ce
-#define _MSR_PLATFORM_INFO_CPUID_FAULTING 31
-#define MSR_PLATFORM_INFO_CPUID_FAULTING (1ULL << _MSR_PLATFORM_INFO_CPUID_FAULTING)
-
-#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140
-#define _MSR_MISC_FEATURES_CPUID_FAULTING 0
-#define MSR_MISC_FEATURES_CPUID_FAULTING (1ULL << _MSR_MISC_FEATURES_CPUID_FAULTING)
-
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index afbeb7f..8f9f964 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -260,7 +260,7 @@ static inline void wrmsr_tsc_aux(uint32_t val)
/* MSR policy object for shared per-domain MSRs */
struct msr_domain_policy
{
- /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
+ /* 0x000000ce - MSR_PLATFORM_INFO */
struct {
bool available; /* This MSR is non-architectural */
bool cpuid_faulting;
@@ -288,7 +288,7 @@ struct msr_vcpu_policy
uint32_t raw;
} spec_ctrl;
- /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
+ /* 0x00000140 - MSR_MISC_FEATURES_ENABLES */
struct {
bool available; /* This MSR is non-architectural */
bool cpuid_faulting;
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-06-26 13:18 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-26 13:18 [PATCH 0/6] x86/msr: Introductory MSR cleanup Andrew Cooper
2018-06-26 13:18 ` [PATCH 1/6] x86/msr: Clean up the MSR_EFER constants Andrew Cooper
2018-06-26 15:33 ` Wei Liu
2018-06-27 10:39 ` Roger Pau Monné
2018-06-27 10:44 ` Andrew Cooper
2018-06-28 13:00 ` Jan Beulich
2018-06-28 13:36 ` Andrew Cooper
2018-06-28 13:56 ` Jan Beulich
2018-09-07 14:47 ` Andrew Cooper
2018-09-07 15:09 ` Jan Beulich
2018-06-26 13:18 ` [PATCH 2/6] x86/msr: Cleanup of misc constants Andrew Cooper
2018-06-26 15:43 ` Wei Liu
2018-06-27 10:48 ` Roger Pau Monné
2018-06-26 13:18 ` Andrew Cooper [this message]
2018-06-26 16:31 ` [PATCH 3/6] x86/msr: Clean up the MSR_{PLATFORM_INFO, MISC_FEATURES_ENABLES} constants Wei Liu
2018-06-27 11:08 ` Roger Pau Monné
2018-06-28 13:04 ` Jan Beulich
2018-06-26 13:18 ` [PATCH 4/6] x86/msr: Clean up the MSR_FEATURE_CONTROL constants Andrew Cooper
2018-06-26 17:59 ` Andrew Cooper
2018-06-27 9:05 ` Jan Beulich
2018-06-27 11:08 ` Wei Liu
2018-06-27 11:21 ` Roger Pau Monné
2018-06-28 13:11 ` Jan Beulich
2018-07-02 5:56 ` Tian, Kevin
2018-06-26 13:18 ` [PATCH 5/6] x86/msr: Clean up the MSR_APIC_BASE constants Andrew Cooper
2018-06-27 13:26 ` Wei Liu
2018-06-27 13:32 ` Roger Pau Monné
2018-06-27 13:35 ` Andrew Cooper
2018-06-27 14:50 ` Andrew Cooper
2018-06-26 13:18 ` [PATCH 6/6] x86/msr: Clean up the x2APIC MSR constants Andrew Cooper
2018-06-27 13:26 ` Wei Liu
2018-06-27 13:50 ` Roger Pau Monné
2018-06-27 14:15 ` Andrew Cooper
2018-06-28 13:18 ` Jan Beulich
2018-06-26 18:22 ` [PATCH 7/6] x86/msr: Introduce msr_{set, clear}_bits() helpers Andrew Cooper
2018-06-27 13:35 ` Wei Liu
2018-06-27 14:17 ` Roger Pau Monné
2018-06-27 14:27 ` Andrew Cooper
2018-06-28 13:26 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1530019098-7058-4-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=roger.pau@citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).