From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Jan Beulich <JBeulich@suse.com>
Subject: [PATCH 1/2] xen/x86: Helpers for cpu feature manipuation
Date: Mon, 26 Oct 2015 11:17:41 +0000 [thread overview]
Message-ID: <1445858262-17965-2-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1445858262-17965-1-git-send-email-andrew.cooper3@citrix.com>
Expose them to assembly code, and replace open-coded versions.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
---
xen/arch/x86/boot/head.S | 2 +-
xen/arch/x86/boot/trampoline.S | 2 +-
xen/arch/x86/cpu/common.c | 4 +-
xen/arch/x86/efi/efi-boot.h | 2 +-
xen/arch/x86/hvm/hvm.c | 10 ++---
xen/arch/x86/traps.c | 2 +-
xen/arch/x86/xstate.c | 4 +-
xen/include/asm-x86/amd.h | 89 +++++++++++++++++++---------------------
xen/include/asm-x86/asm_defns.h | 2 +-
xen/include/asm-x86/cpufeature.h | 7 +++-
10 files changed, 63 insertions(+), 61 deletions(-)
diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S
index f63b349..ac4962b 100644
--- a/xen/arch/x86/boot/head.S
+++ b/xen/arch/x86/boot/head.S
@@ -142,7 +142,7 @@ __start:
mov %edx,sym_phys(boot_cpu_data)+CPUINFO_FEATURE_OFFSET(X86_FEATURE_LM)
/* Check for availability of long mode. */
- bt $X86_FEATURE_LM & 0x1f,%edx
+ bt $cpufeat_bit(X86_FEATURE_LM),%edx
jnc bad_cpu
/* Stash TSC to calculate a good approximation of time-since-boot */
diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S
index 8b0d9c1..c8f32cd 100644
--- a/xen/arch/x86/boot/trampoline.S
+++ b/xen/arch/x86/boot/trampoline.S
@@ -89,7 +89,7 @@ trampoline_protmode_entry:
movl $MSR_EFER,%ecx
rdmsr
or $EFER_LME|EFER_SCE,%eax /* Long Mode + SYSCALL/SYSRET */
- bt $X86_FEATURE_NX % 32,%edi /* No Execute? */
+ bt $cpufeat_bit(X86_FEATURE_NX),%edi /* No Execute? */
jnc 1f
btsl $_EFER_NX,%eax /* No Execute */
1: wrmsr
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index f256444..a5caa84 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -202,7 +202,7 @@ static void __init early_cpu_detect(void)
c->x86_mask = tfms & 15;
cap0 &= ~cleared_caps[0];
cap4 &= ~cleared_caps[4];
- if (cap0 & (1<<19))
+ if (cap0 & cpufeat_mask(X86_FEATURE_CLFLSH))
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
/* Leaf 0x1 capabilities filled in early for Xen. */
c->x86_capability[0] = cap0;
@@ -263,7 +263,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
if ( c->cpuid_level >= 0x00000007 ) {
u32 dummy;
cpuid_count(0x00000007, 0, &dummy, &ebx, &dummy, &dummy);
- c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
+ c->x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)] = ebx;
}
}
diff --git a/xen/arch/x86/efi/efi-boot.h b/xen/arch/x86/efi/efi-boot.h
index 4c7f383..d8ca862 100644
--- a/xen/arch/x86/efi/efi-boot.h
+++ b/xen/arch/x86/efi/efi-boot.h
@@ -225,7 +225,7 @@ static void __init noreturn efi_arch_post_exit_boot(void)
asm volatile("pushq $0\n\tpopfq");
rdmsrl(MSR_EFER, efer);
efer |= EFER_SCE;
- if ( cpuid_ext_features & (1 << (X86_FEATURE_NX & 0x1f)) )
+ if ( cpuid_ext_features & cpufeat_mask(X86_FEATURE_NX) )
efer |= EFER_NX;
wrmsrl(MSR_EFER, efer);
write_cr0(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP |
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 3fa2280..965bfbf 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1850,8 +1850,8 @@ static const char * hvm_efer_valid(const struct vcpu *v, uint64_t value,
}
else
{
- ext1_edx = boot_cpu_data.x86_capability[X86_FEATURE_LM / 32];
- ext1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_SVM / 32];
+ ext1_edx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_LM)];
+ ext1_ecx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_SVM)];
}
/*
@@ -1917,9 +1917,9 @@ static unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v,
}
else
{
- leaf1_edx = boot_cpu_data.x86_capability[X86_FEATURE_VME / 32];
- leaf1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_PCID / 32];
- leaf7_0_ebx = boot_cpu_data.x86_capability[X86_FEATURE_FSGSBASE / 32];
+ leaf1_edx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_VME)];
+ leaf1_ecx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_PCID)];
+ leaf7_0_ebx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)];
}
return ~(unsigned long)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 8093535..b32f696 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -935,7 +935,7 @@ void pv_cpuid(struct cpu_user_regs *regs)
goto unsupported;
if ( regs->_ecx == 1 )
{
- a &= boot_cpu_data.x86_capability[X86_FEATURE_XSAVEOPT / 32];
+ a &= boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)];
if ( !cpu_has_xsaves )
b = c = d = 0;
}
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 9ddff90..c6238d9 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -326,9 +326,9 @@ void xstate_init(struct cpuinfo_x86 *c)
eax &= (cpufeat_mask(X86_FEATURE_XSAVEOPT) |
cpufeat_mask(X86_FEATURE_XSAVEC));
- c->x86_capability[X86_FEATURE_XSAVEOPT / 32] = eax;
+ c->x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)] = eax;
- BUG_ON(eax != boot_cpu_data.x86_capability[X86_FEATURE_XSAVEOPT / 32]);
+ BUG_ON(eax != boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)]);
}
static bool_t valid_xcr0(u64 xcr0)
diff --git a/xen/include/asm-x86/amd.h b/xen/include/asm-x86/amd.h
index 87b6e3e..bb23bea 100644
--- a/xen/include/asm-x86/amd.h
+++ b/xen/include/asm-x86/amd.h
@@ -9,95 +9,92 @@
/* CPUID masked for use by AMD-V Extended Migration */
-#define X86_FEATURE_BITPOS(_feature_) ((_feature_) % 32)
-#define __bit(_x_) (1U << X86_FEATURE_BITPOS(_x_))
-
/* Family 0Fh, Revision C */
#define AMD_FEATURES_K8_REV_C_ECX 0
-#define AMD_FEATURES_K8_REV_C_EDX ( \
- __bit(X86_FEATURE_FPU) | __bit(X86_FEATURE_VME) | \
- __bit(X86_FEATURE_DE) | __bit(X86_FEATURE_PSE) | \
- __bit(X86_FEATURE_TSC) | __bit(X86_FEATURE_MSR) | \
- __bit(X86_FEATURE_PAE) | __bit(X86_FEATURE_MCE) | \
- __bit(X86_FEATURE_CX8) | __bit(X86_FEATURE_APIC) | \
- __bit(X86_FEATURE_SEP) | __bit(X86_FEATURE_MTRR) | \
- __bit(X86_FEATURE_PGE) | __bit(X86_FEATURE_MCA) | \
- __bit(X86_FEATURE_CMOV) | __bit(X86_FEATURE_PAT) | \
- __bit(X86_FEATURE_PSE36) | __bit(X86_FEATURE_CLFLSH)| \
- __bit(X86_FEATURE_MMX) | __bit(X86_FEATURE_FXSR) | \
- __bit(X86_FEATURE_XMM) | __bit(X86_FEATURE_XMM2))
-#define AMD_EXTFEATURES_K8_REV_C_ECX 0
-#define AMD_EXTFEATURES_K8_REV_C_EDX ( \
- __bit(X86_FEATURE_FPU) | __bit(X86_FEATURE_VME) | \
- __bit(X86_FEATURE_DE) | __bit(X86_FEATURE_PSE) | \
- __bit(X86_FEATURE_TSC) | __bit(X86_FEATURE_MSR) | \
- __bit(X86_FEATURE_PAE) | __bit(X86_FEATURE_MCE) | \
- __bit(X86_FEATURE_CX8) | __bit(X86_FEATURE_APIC) | \
- __bit(X86_FEATURE_SYSCALL) | __bit(X86_FEATURE_MTRR) | \
- __bit(X86_FEATURE_PGE) | __bit(X86_FEATURE_MCA) | \
- __bit(X86_FEATURE_CMOV) | __bit(X86_FEATURE_PAT) | \
- __bit(X86_FEATURE_PSE36) | __bit(X86_FEATURE_NX) | \
- __bit(X86_FEATURE_MMXEXT) | __bit(X86_FEATURE_MMX) | \
- __bit(X86_FEATURE_FXSR) | __bit(X86_FEATURE_LM) | \
- __bit(X86_FEATURE_3DNOWEXT) | __bit(X86_FEATURE_3DNOW))
+#define AMD_FEATURES_K8_REV_C_EDX ( \
+ cpufeat_mask(X86_FEATURE_FPU) | cpufeat_mask(X86_FEATURE_VME) | \
+ cpufeat_mask(X86_FEATURE_DE) | cpufeat_mask(X86_FEATURE_PSE) | \
+ cpufeat_mask(X86_FEATURE_TSC) | cpufeat_mask(X86_FEATURE_MSR) | \
+ cpufeat_mask(X86_FEATURE_PAE) | cpufeat_mask(X86_FEATURE_MCE) | \
+ cpufeat_mask(X86_FEATURE_CX8) | cpufeat_mask(X86_FEATURE_APIC) | \
+ cpufeat_mask(X86_FEATURE_SEP) | cpufeat_mask(X86_FEATURE_MTRR) | \
+ cpufeat_mask(X86_FEATURE_PGE) | cpufeat_mask(X86_FEATURE_MCA) | \
+ cpufeat_mask(X86_FEATURE_CMOV) | cpufeat_mask(X86_FEATURE_PAT) | \
+ cpufeat_mask(X86_FEATURE_PSE36) | cpufeat_mask(X86_FEATURE_CLFLSH)| \
+ cpufeat_mask(X86_FEATURE_MMX) | cpufeat_mask(X86_FEATURE_FXSR) | \
+ cpufeat_mask(X86_FEATURE_XMM) | cpufeat_mask(X86_FEATURE_XMM2))
+#define AMD_EXTFEATURES_K8_REV_C_ECX 0
+#define AMD_EXTFEATURES_K8_REV_C_EDX ( \
+ cpufeat_mask(X86_FEATURE_FPU) | cpufeat_mask(X86_FEATURE_VME) | \
+ cpufeat_mask(X86_FEATURE_DE) | cpufeat_mask(X86_FEATURE_PSE) | \
+ cpufeat_mask(X86_FEATURE_TSC) | cpufeat_mask(X86_FEATURE_MSR) | \
+ cpufeat_mask(X86_FEATURE_PAE) | cpufeat_mask(X86_FEATURE_MCE) | \
+ cpufeat_mask(X86_FEATURE_CX8) | cpufeat_mask(X86_FEATURE_APIC) | \
+ cpufeat_mask(X86_FEATURE_SYSCALL) | cpufeat_mask(X86_FEATURE_MTRR) | \
+ cpufeat_mask(X86_FEATURE_PGE) | cpufeat_mask(X86_FEATURE_MCA) | \
+ cpufeat_mask(X86_FEATURE_CMOV) | cpufeat_mask(X86_FEATURE_PAT) | \
+ cpufeat_mask(X86_FEATURE_PSE36) | cpufeat_mask(X86_FEATURE_NX) | \
+ cpufeat_mask(X86_FEATURE_MMXEXT) | cpufeat_mask(X86_FEATURE_MMX) | \
+ cpufeat_mask(X86_FEATURE_FXSR) | cpufeat_mask(X86_FEATURE_LM) | \
+ cpufeat_mask(X86_FEATURE_3DNOWEXT) | cpufeat_mask(X86_FEATURE_3DNOW))
/* Family 0Fh, Revision D */
#define AMD_FEATURES_K8_REV_D_ECX AMD_FEATURES_K8_REV_C_ECX
#define AMD_FEATURES_K8_REV_D_EDX AMD_FEATURES_K8_REV_C_EDX
#define AMD_EXTFEATURES_K8_REV_D_ECX (AMD_EXTFEATURES_K8_REV_C_ECX |\
- __bit(X86_FEATURE_LAHF_LM))
+ cpufeat_mask(X86_FEATURE_LAHF_LM))
#define AMD_EXTFEATURES_K8_REV_D_EDX (AMD_EXTFEATURES_K8_REV_C_EDX |\
- __bit(X86_FEATURE_FFXSR))
+ cpufeat_mask(X86_FEATURE_FFXSR))
/* Family 0Fh, Revision E */
#define AMD_FEATURES_K8_REV_E_ECX (AMD_FEATURES_K8_REV_D_ECX | \
- __bit(X86_FEATURE_XMM3))
+ cpufeat_mask(X86_FEATURE_XMM3))
#define AMD_FEATURES_K8_REV_E_EDX (AMD_FEATURES_K8_REV_D_EDX | \
- __bit(X86_FEATURE_HT))
+ cpufeat_mask(X86_FEATURE_HT))
#define AMD_EXTFEATURES_K8_REV_E_ECX (AMD_EXTFEATURES_K8_REV_D_ECX |\
- __bit(X86_FEATURE_CMP_LEGACY))
+ cpufeat_mask(X86_FEATURE_CMP_LEGACY))
#define AMD_EXTFEATURES_K8_REV_E_EDX AMD_EXTFEATURES_K8_REV_D_EDX
/* Family 0Fh, Revision F */
#define AMD_FEATURES_K8_REV_F_ECX (AMD_FEATURES_K8_REV_E_ECX | \
- __bit(X86_FEATURE_CX16))
+ cpufeat_mask(X86_FEATURE_CX16))
#define AMD_FEATURES_K8_REV_F_EDX AMD_FEATURES_K8_REV_E_EDX
#define AMD_EXTFEATURES_K8_REV_F_ECX (AMD_EXTFEATURES_K8_REV_E_ECX |\
- __bit(X86_FEATURE_SVM) | __bit(X86_FEATURE_EXTAPIC) | \
- __bit(X86_FEATURE_CR8_LEGACY))
+ cpufeat_mask(X86_FEATURE_SVM) | cpufeat_mask(X86_FEATURE_EXTAPIC) | \
+ cpufeat_mask(X86_FEATURE_CR8_LEGACY))
#define AMD_EXTFEATURES_K8_REV_F_EDX (AMD_EXTFEATURES_K8_REV_E_EDX |\
- __bit(X86_FEATURE_RDTSCP))
+ cpufeat_mask(X86_FEATURE_RDTSCP))
/* Family 0Fh, Revision G */
#define AMD_FEATURES_K8_REV_G_ECX AMD_FEATURES_K8_REV_F_ECX
#define AMD_FEATURES_K8_REV_G_EDX AMD_FEATURES_K8_REV_F_EDX
#define AMD_EXTFEATURES_K8_REV_G_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\
- __bit(X86_FEATURE_3DNOWPREFETCH))
+ cpufeat_mask(X86_FEATURE_3DNOWPREFETCH))
#define AMD_EXTFEATURES_K8_REV_G_EDX AMD_EXTFEATURES_K8_REV_F_EDX
/* Family 10h, Revision B */
#define AMD_FEATURES_FAM10h_REV_B_ECX (AMD_FEATURES_K8_REV_F_ECX | \
- __bit(X86_FEATURE_POPCNT) | __bit(X86_FEATURE_MWAIT))
+ cpufeat_mask(X86_FEATURE_POPCNT) | cpufeat_mask(X86_FEATURE_MWAIT))
#define AMD_FEATURES_FAM10h_REV_B_EDX AMD_FEATURES_K8_REV_F_EDX
#define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\
- __bit(X86_FEATURE_ABM) | __bit(X86_FEATURE_SSE4A) | \
- __bit(X86_FEATURE_MISALIGNSSE) | __bit(X86_FEATURE_OSVW) | \
- __bit(X86_FEATURE_IBS))
+ cpufeat_mask(X86_FEATURE_ABM) | cpufeat_mask(X86_FEATURE_SSE4A) | \
+ cpufeat_mask(X86_FEATURE_MISALIGNSSE) | cpufeat_mask(X86_FEATURE_OSVW) |\
+ cpufeat_mask(X86_FEATURE_IBS))
#define AMD_EXTFEATURES_FAM10h_REV_B_EDX (AMD_EXTFEATURES_K8_REV_F_EDX |\
- __bit(X86_FEATURE_PAGE1GB))
+ cpufeat_mask(X86_FEATURE_PAGE1GB))
/* Family 10h, Revision C */
#define AMD_FEATURES_FAM10h_REV_C_ECX AMD_FEATURES_FAM10h_REV_B_ECX
#define AMD_FEATURES_FAM10h_REV_C_EDX AMD_FEATURES_FAM10h_REV_B_EDX
#define AMD_EXTFEATURES_FAM10h_REV_C_ECX (AMD_EXTFEATURES_FAM10h_REV_B_ECX |\
- __bit(X86_FEATURE_SKINIT) | __bit(X86_FEATURE_WDT))
+ cpufeat_mask(X86_FEATURE_SKINIT) | cpufeat_mask(X86_FEATURE_WDT))
#define AMD_EXTFEATURES_FAM10h_REV_C_EDX AMD_EXTFEATURES_FAM10h_REV_B_EDX
/* Family 11h, Revision B */
#define AMD_FEATURES_FAM11h_REV_B_ECX AMD_FEATURES_K8_REV_G_ECX
#define AMD_FEATURES_FAM11h_REV_B_EDX AMD_FEATURES_K8_REV_G_EDX
#define AMD_EXTFEATURES_FAM11h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_G_ECX |\
- __bit(X86_FEATURE_SKINIT))
+ cpufeat_mask(X86_FEATURE_SKINIT))
#define AMD_EXTFEATURES_FAM11h_REV_B_EDX AMD_EXTFEATURES_K8_REV_G_EDX
/* AMD errata checking
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index 7c8c2c0..d750f03 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -137,7 +137,7 @@ void ret_from_intr(void);
#endif
#define CPUINFO_FEATURE_OFFSET(feature) \
- ((((feature) >> 3) & ~3) + CPUINFO_features)
+ (CPUINFO_features + (cpufeat_word(feature) * 4))
#else
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index e94b04d..e823954 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -9,6 +9,8 @@
#define __ASM_I386_CPUFEATURE_H
#endif
+#include <xen/const.h>
+
#define NCAPINTS 8 /* N 32-bit words worth of info */
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
@@ -160,12 +162,15 @@
#define X86_FEATURE_ADX (7*32+19) /* ADCX, ADOX instructions */
#define X86_FEATURE_SMAP (7*32+20) /* Supervisor Mode Access Prevention */
+#define cpufeat_word(idx) ((idx) / 32)
+#define cpufeat_bit(idx) ((idx) % 32)
+#define cpufeat_mask(idx) (_AC(1, U) << cpufeat_bit(idx))
+
#if !defined(__ASSEMBLY__) && !defined(X86_FEATURES_ONLY)
#include <xen/bitops.h>
#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
-#define cpufeat_mask(idx) (1u << ((idx) & 31))
#define CPUID_MWAIT_LEAF 5
#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
--
2.1.4
next prev parent reply other threads:[~2015-10-26 11:17 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-26 11:17 [PATCH 0/2] xen/x86: Improvements to x86_capability[] handling Andrew Cooper
2015-10-26 11:17 ` Andrew Cooper [this message]
2015-10-26 12:06 ` [PATCH 1/2] xen/x86: Helpers for cpu feature manipuation Jan Beulich
2015-10-26 12:42 ` Andrew Cooper
2015-10-26 11:17 ` [PATCH 2/2] xen/x86: Remove assumptions about the layout of x86_capabilities Andrew Cooper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1445858262-17965-2-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).