* [PATCH] x86, fpu: rename XCR0 access macro
@ 2015-04-24 23:00 Dave Hansen
2015-04-25 8:24 ` Borislav Petkov
0 siblings, 1 reply; 2+ messages in thread
From: Dave Hansen @ 2015-04-24 23:00 UTC (permalink / raw)
To: linux-kernel; +Cc: Dave Hansen, dave.hansen, x86, fenghua.yu, bp, oleg, luto
From: Dave Hansen <dave.hansen@linux.intel.com>
The SDM says that the xgetbv instruction:
Reads the contents of the extended control register (XCR)
specified in the ECX register into registers EDX:EAX.
In other words, xgetbv(0) gets the thing that the SDM calls XCR0.
We have a macro for that "0" and we call it
XCR_XFEATURE_ENABLED_MASK. That's a bit unfortunate because it
is not a mask nor does the "0" have much to do with the features
we have enabled. It's an index if anything.
Rename the macro to what it means: "XCR0".
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
---
b/arch/x86/crypto/camellia_aesni_avx2_glue.c | 2 +-
b/arch/x86/crypto/camellia_aesni_avx_glue.c | 2 +-
b/arch/x86/crypto/cast5_avx_glue.c | 2 +-
b/arch/x86/crypto/cast6_avx_glue.c | 2 +-
b/arch/x86/crypto/serpent_avx2_glue.c | 2 +-
b/arch/x86/crypto/serpent_avx_glue.c | 2 +-
b/arch/x86/crypto/sha1_ssse3_glue.c | 2 +-
b/arch/x86/crypto/sha256_ssse3_glue.c | 2 +-
b/arch/x86/crypto/sha512_ssse3_glue.c | 2 +-
b/arch/x86/crypto/twofish_avx_glue.c | 2 +-
b/arch/x86/include/asm/xcr.h | 6 +++++-
b/arch/x86/kernel/xsave.c | 2 +-
b/arch/x86/kvm/x86.c | 16 ++++++++--------
b/arch/x86/power/cpu.c | 2 +-
14 files changed, 25 insertions(+), 21 deletions(-)
diff -puN arch/x86/kvm/x86.c~x86-fpu-rename-xcr0-macro arch/x86/kvm/x86.c
--- a/arch/x86/kvm/x86.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.454089347 -0700
+++ b/arch/x86/kvm/x86.c 2015-04-24 15:59:42.193345311 -0700
@@ -634,7 +634,7 @@ static void kvm_load_guest_xcr0(struct k
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
!vcpu->guest_xcr0_loaded) {
/* kvm_set_xcr() also depends on this */
- xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+ xsetbv(XCR0, vcpu->arch.xcr0);
vcpu->guest_xcr0_loaded = 1;
}
}
@@ -643,7 +643,7 @@ static void kvm_put_guest_xcr0(struct kv
{
if (vcpu->guest_xcr0_loaded) {
if (vcpu->arch.xcr0 != host_xcr0)
- xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+ xsetbv(XCR0, host_xcr0);
vcpu->guest_xcr0_loaded = 0;
}
}
@@ -654,8 +654,8 @@ static int __kvm_set_xcr(struct kvm_vcpu
u64 old_xcr0 = vcpu->arch.xcr0;
u64 valid_bits;
- /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
- if (index != XCR_XFEATURE_ENABLED_MASK)
+ /* Only support XCR0 now */
+ if (index != XCR0)
return 1;
if (!(xcr0 & XSTATE_FP))
return 1;
@@ -3295,7 +3295,7 @@ static void kvm_vcpu_ioctl_x86_get_xcrs(
guest_xcrs->nr_xcrs = 1;
guest_xcrs->flags = 0;
- guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
+ guest_xcrs->xcrs[0].xcr = XCR0;
guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
}
@@ -3312,8 +3312,8 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(s
for (i = 0; i < guest_xcrs->nr_xcrs; i++)
/* Only support XCR0 currently */
- if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
- r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
+ if (guest_xcrs->xcrs[i].xcr == XCR0) {
+ r = __kvm_set_xcr(vcpu, XCR0,
guest_xcrs->xcrs[i].value);
break;
}
@@ -5809,7 +5809,7 @@ int kvm_arch_init(void *opaque)
perf_register_guest_info_callbacks(&kvm_guest_cbs);
if (cpu_has_xsave)
- host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ host_xcr0 = xgetbv(XCR0);
kvm_lapic_init();
#ifdef CONFIG_X86_64
diff -puN arch/x86/include/asm/xcr.h~x86-fpu-rename-xcr0-macro arch/x86/include/asm/xcr.h
--- a/arch/x86/include/asm/xcr.h~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.456089437 -0700
+++ b/arch/x86/include/asm/xcr.h 2015-04-24 15:55:10.483090655 -0700
@@ -17,13 +17,17 @@
#ifndef _ASM_X86_XCR_H
#define _ASM_X86_XCR_H
-#define XCR_XFEATURE_ENABLED_MASK 0x00000000
+#define XCR0 0x00000000
#ifdef __KERNEL__
# ifndef __ASSEMBLY__
#include <linux/types.h>
+/*
+ * Reads the contents of the extended control register (XCR)
+ * specified in the ECX register into registers EDX:EAX.
+ */
static inline u64 xgetbv(u32 index)
{
u32 eax, edx;
diff -puN arch/x86/crypto/camellia_aesni_avx2_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/camellia_aesni_avx2_glue.c
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.458089527 -0700
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c 2015-04-24 15:55:10.484090700 -0700
@@ -568,7 +568,7 @@ static int __init camellia_aesni_init(vo
return -ENODEV;
}
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX2 detected but unusable.\n");
return -ENODEV;
diff -puN arch/x86/crypto/camellia_aesni_avx_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/camellia_aesni_avx_glue.c
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.460089618 -0700
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c 2015-04-24 15:55:10.484090700 -0700
@@ -560,7 +560,7 @@ static int __init camellia_aesni_init(vo
return -ENODEV;
}
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV;
diff -puN arch/x86/crypto/cast5_avx_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/cast5_avx_glue.c
--- a/arch/x86/crypto/cast5_avx_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.462089708 -0700
+++ b/arch/x86/crypto/cast5_avx_glue.c 2015-04-24 15:55:10.485090745 -0700
@@ -475,7 +475,7 @@ static int __init cast5_init(void)
return -ENODEV;
}
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV;
diff -puN arch/x86/crypto/cast6_avx_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/cast6_avx_glue.c
--- a/arch/x86/crypto/cast6_avx_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.463089753 -0700
+++ b/arch/x86/crypto/cast6_avx_glue.c 2015-04-24 15:55:10.485090745 -0700
@@ -597,7 +597,7 @@ static int __init cast6_init(void)
return -ENODEV;
}
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV;
diff -puN arch/x86/crypto/serpent_avx2_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/serpent_avx2_glue.c
--- a/arch/x86/crypto/serpent_avx2_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.465089843 -0700
+++ b/arch/x86/crypto/serpent_avx2_glue.c 2015-04-24 15:55:10.485090745 -0700
@@ -544,7 +544,7 @@ static int __init init(void)
return -ENODEV;
}
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV;
diff -puN arch/x86/crypto/serpent_avx_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/serpent_avx_glue.c
--- a/arch/x86/crypto/serpent_avx_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.467089933 -0700
+++ b/arch/x86/crypto/serpent_avx_glue.c 2015-04-24 15:55:10.486090790 -0700
@@ -603,7 +603,7 @@ static int __init serpent_init(void)
return -ENODEV;
}
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
printk(KERN_INFO "AVX detected but unusable.\n");
return -ENODEV;
diff -puN arch/x86/crypto/sha1_ssse3_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/sha1_ssse3_glue.c
--- a/arch/x86/crypto/sha1_ssse3_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.469090024 -0700
+++ b/arch/x86/crypto/sha1_ssse3_glue.c 2015-04-24 15:55:10.486090790 -0700
@@ -128,7 +128,7 @@ static bool __init avx_usable(void)
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
diff -puN arch/x86/crypto/sha256_ssse3_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/sha256_ssse3_glue.c
--- a/arch/x86/crypto/sha256_ssse3_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.471090114 -0700
+++ b/arch/x86/crypto/sha256_ssse3_glue.c 2015-04-24 15:55:10.486090790 -0700
@@ -137,7 +137,7 @@ static bool __init avx_usable(void)
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
diff -puN arch/x86/crypto/sha512_ssse3_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/sha512_ssse3_glue.c
--- a/arch/x86/crypto/sha512_ssse3_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.472090159 -0700
+++ b/arch/x86/crypto/sha512_ssse3_glue.c 2015-04-24 15:55:10.487090836 -0700
@@ -136,7 +136,7 @@ static bool __init avx_usable(void)
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
diff -puN arch/x86/crypto/twofish_avx_glue.c~x86-fpu-rename-xcr0-macro arch/x86/crypto/twofish_avx_glue.c
--- a/arch/x86/crypto/twofish_avx_glue.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.474090249 -0700
+++ b/arch/x86/crypto/twofish_avx_glue.c 2015-04-24 15:55:10.487090836 -0700
@@ -565,7 +565,7 @@ static int __init twofish_init(void)
return -ENODEV;
}
- xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ xcr0 = xgetbv(XCR0);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
printk(KERN_INFO "AVX detected but unusable.\n");
return -ENODEV;
diff -puN arch/x86/kernel/xsave.c~x86-fpu-rename-xcr0-macro arch/x86/kernel/xsave.c
--- a/arch/x86/kernel/xsave.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.476090339 -0700
+++ b/arch/x86/kernel/xsave.c 2015-04-24 15:55:10.487090836 -0700
@@ -456,7 +456,7 @@ static void prepare_fx_sw_frame(void)
static inline void xstate_enable(void)
{
cr4_set_bits(X86_CR4_OSXSAVE);
- xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
+ xsetbv(XCR0, pcntxt_mask);
}
/*
diff -puN arch/x86/power/cpu.c~x86-fpu-rename-xcr0-macro arch/x86/power/cpu.c
--- a/arch/x86/power/cpu.c~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.478090430 -0700
+++ b/arch/x86/power/cpu.c 2015-04-24 15:55:10.488090881 -0700
@@ -225,7 +225,7 @@ static void notrace __restore_processor_
* restore XCR0 for xsave capable cpu's.
*/
if (cpu_has_xsave)
- xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
+ xsetbv(XCR0, pcntxt_mask);
fix_processor_context();
_
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] x86, fpu: rename XCR0 access macro
2015-04-24 23:00 [PATCH] x86, fpu: rename XCR0 access macro Dave Hansen
@ 2015-04-25 8:24 ` Borislav Petkov
0 siblings, 0 replies; 2+ messages in thread
From: Borislav Petkov @ 2015-04-25 8:24 UTC (permalink / raw)
To: Dave Hansen; +Cc: linux-kernel, dave.hansen, x86, fenghua.yu, oleg, luto
On Fri, Apr 24, 2015 at 04:00:35PM -0700, Dave Hansen wrote:
>
> From: Dave Hansen <dave.hansen@linux.intel.com>
>
> The SDM says that the xgetbv instruction:
>
> Reads the contents of the extended control register (XCR)
> specified in the ECX register into registers EDX:EAX.
>
> In other words, xgetbv(0) gets the thing that the SDM calls XCR0.
> We have a macro for that "0" and we call it
> XCR_XFEATURE_ENABLED_MASK. That's a bit unfortunate because it
> is not a mask nor does the "0" have much to do with the features
> we have enabled. It's an index if anything.
>
> Rename the macro to what it means: "XCR0".
>
> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: x86@kernel.org
> Cc: Fenghua Yu <fenghua.yu@intel.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Oleg Nesterov <oleg@redhat.com>
> Cc: Andy Lutomirski <luto@amacapital.net>
>
> ---
>
> b/arch/x86/crypto/camellia_aesni_avx2_glue.c | 2 +-
> b/arch/x86/crypto/camellia_aesni_avx_glue.c | 2 +-
> b/arch/x86/crypto/cast5_avx_glue.c | 2 +-
> b/arch/x86/crypto/cast6_avx_glue.c | 2 +-
> b/arch/x86/crypto/serpent_avx2_glue.c | 2 +-
> b/arch/x86/crypto/serpent_avx_glue.c | 2 +-
> b/arch/x86/crypto/sha1_ssse3_glue.c | 2 +-
> b/arch/x86/crypto/sha256_ssse3_glue.c | 2 +-
> b/arch/x86/crypto/sha512_ssse3_glue.c | 2 +-
> b/arch/x86/crypto/twofish_avx_glue.c | 2 +-
> b/arch/x86/include/asm/xcr.h | 6 +++++-
> b/arch/x86/kernel/xsave.c | 2 +-
> b/arch/x86/kvm/x86.c | 16 ++++++++--------
> b/arch/x86/power/cpu.c | 2 +-
> 14 files changed, 25 insertions(+), 21 deletions(-)
Applied, but...
> diff -puN arch/x86/include/asm/xcr.h~x86-fpu-rename-xcr0-macro arch/x86/include/asm/xcr.h
> --- a/arch/x86/include/asm/xcr.h~x86-fpu-rename-xcr0-macro 2015-04-24 15:55:10.456089437 -0700
> +++ b/arch/x86/include/asm/xcr.h 2015-04-24 15:55:10.483090655 -0700
> @@ -17,13 +17,17 @@
> #ifndef _ASM_X86_XCR_H
> #define _ASM_X86_XCR_H
>
> -#define XCR_XFEATURE_ENABLED_MASK 0x00000000
> +#define XCR0 0x00000000
... changed that to a simple "0".
Btw, I can imagine this becoming a macro, i.e. XCR(num) later. I see
there already is the %ecx==1 call option of XGETBV which gives you the
optimization used by XSAVEOPT/XSAVES so it is all a matter of time. :)
Thanks.
--
Regards/Gruss,
Boris.
ECO tip #101: Trim your mails when you reply.
--
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2015-04-25 8:24 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-04-24 23:00 [PATCH] x86, fpu: rename XCR0 access macro Dave Hansen
2015-04-25 8:24 ` Borislav Petkov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox