xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86: enable VIA CPU support
@ 2012-09-21 11:40 Jan Beulich
  2012-09-21 12:55 ` Keir Fraser
  0 siblings, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2012-09-21 11:40 UTC (permalink / raw)
  To: xen-devel

[-- Attachment #1: Type: text/plain, Size: 7257 bytes --]

Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
recognized for these purposes, at once stripping off any 32-bit CPU
only bits from the respective CPU support file.

This particularly implies untying the VMX == Intel assumption in a few
places.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

---
Note that my testing of this functionality wasn't as wide as I would
have hoped it to be, since the box I was provided only survived the
first few days - meanwhile it doesn't stay up long enough to just build
hypervisor and tools. Therefore, further fixes to fully support these
CPUs may be needed as the VIA folks themselves get to test that code.

--- a/xen/arch/x86/acpi/suspend.c
+++ b/xen/arch/x86/acpi/suspend.c
@@ -32,7 +32,8 @@ void save_rest_processor_state(void)
     rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
     rdmsrl(MSR_CSTAR, saved_cstar);
     rdmsrl(MSR_LSTAR, saved_lstar);
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
         rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
@@ -59,7 +60,8 @@ void restore_rest_processor_state(void)
     wrmsrl(MSR_GS_BASE, saved_gs_base);
     wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* Recover sysenter MSRs */
         wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
--- a/xen/arch/x86/cpu/Makefile
+++ b/xen/arch/x86/cpu/Makefile
@@ -2,10 +2,8 @@ subdir-y += mcheck
 subdir-y += mtrr
 
 obj-y += amd.o
+obj-y += centaur.o
 obj-y += common.o
 obj-y += intel.o
 obj-y += intel_cacheinfo.o
 obj-y += mwait-idle.o
-
-# Keeping around for VIA support (JBeulich)
-# obj-$(x86_32) += centaur.o
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf
 		c->x86_capability[5] = cpuid_edx(0xC0000001);
 	}
 
-	/* Cyrix III family needs CX8 & PGE explicity enabled. */
-	if (c->x86_model >=6 && c->x86_model <= 9) {
-		rdmsrl(MSR_VIA_FCR, msr_content);
-		wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
-		set_bit(X86_FEATURE_CX8, c->x86_capability);
+	if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+		c->x86_cache_alignment = c->x86_clflush_size * 2;
+		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
 	}
 
-	/* Before Nehemiah, the C3's had 3dNOW! */
-	if (c->x86_model >=6 && c->x86_model <9)
-		set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-
 	get_model_name(c);
 	display_cacheinfo(c);
 }
 
 static void __init init_centaur(struct cpuinfo_x86 *c)
 {
-	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, c->x86_capability);
-
 	if (c->x86 == 6)
 		init_c3(c);
 }
 
-static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
-	/* VIA C3 CPUs (670-68F) need further shifting. */
-	if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
-		size >>= 8;
-
-	/* VIA also screwed up Nehemiah stepping 1, and made
-	   it return '65KB' instead of '64KB'
-	   - Note, it seems this may only be in engineering samples. */
-	if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
-		size -=1;
-
-	return size;
-}
-
 static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
 	.c_vendor	= "Centaur",
 	.c_ident	= { "CentaurHauls" },
 	.c_init		= init_centaur,
-	.c_size_cache	= centaur_size_cache,
 };
 
 int __init centaur_init_cpu(void)
@@ -97,5 +71,3 @@ int __init centaur_init_cpu(void)
 	cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
 	return 0;
 }
-
-//early_arch_initcall(centaur_init_cpu);
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -567,6 +567,7 @@ void __init early_cpu_init(void)
 {
 	intel_cpu_init();
 	amd_init_cpu();
+	centaur_init_cpu();
 	early_cpu_detect();
 }
 /*
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -114,6 +114,7 @@ static int __init hvm_enable(void)
     switch ( boot_cpu_data.x86_vendor )
     {
     case X86_VENDOR_INTEL:
+    case X86_VENDOR_CENTAUR:
         fns = start_vmx();
         break;
     case X86_VENDOR_AMD:
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -151,13 +151,15 @@ nestedhvm_is_n2(struct vcpu *v)
 static int __init
 nestedhvm_setup(void)
 {
-    /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
-    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
-    unsigned int i, order = get_order_from_pages(nr);
+    unsigned int i, nr, order;
 
     if ( !hvm_funcs.name )
         return 0;
 
+    /* Same format and size as hvm_io_bitmap (VMX needs only 2 pages). */
+    nr = !strcmp(hvm_funcs.name, "VMX") ? 2 : 3;
+    order = get_order_from_pages(nr);
+
     /* shadow_io_bitmaps can't be declared static because
      *   they must fulfill hw requirements (page aligned section)
      *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -156,8 +156,7 @@ static void enable_hypercall_page(struct
     *(u32 *)(p + 1) = 0x80000000;
     *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
     *(u8  *)(p + 6) = 0x01;
-    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-                       ? 0xc1 : 0xd9);
+    *(u8  *)(p + 7) = (!strcmp(hvm_funcs.name, "VMX") ? 0xc1 : 0xd9);
     *(u8  *)(p + 8) = 0xc3; /* ret */
     memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
 
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x
                 break;
 
             /* Currently only EPT is supported */
-            if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+            if ( strcmp(hvm_funcs.name, "VMX") )
                 break;
 
             rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -83,7 +83,7 @@ static void p2m_initialise(struct domain
 
     p2m->cr3 = CR3_EADDR;
 
-    if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
+    if ( hap_enabled(d) && !strcmp(hvm_funcs.name, "VMX") )
         ept_p2m_init(p2m);
     else
         p2m_pt_init(p2m);
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init
     wrmsrl(MSR_LSTAR, (unsigned long)stack);
     stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* SYSENTER entry. */
         wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);



[-- Attachment #2: x86-Centaur.patch --]
[-- Type: text/plain, Size: 7284 bytes --]

x86: enable VIA CPU support

Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
recognized for these purposes, at once stripping off any 32-bit CPU
only bits from the respective CPU support file.

This particularly implies untying the VMX == Intel assumption in a few
places.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

---
Note that my testing of this functionality wasn't as wide as I would
have hoped it to be, since the box I was provided only survived the
first few days - meanwhile it doesn't stay up long enough to just build
hypervisor and tools. Therefore, further fixes to fully support these
CPUs may be needed as the VIA folks themselves get to test that code.

--- a/xen/arch/x86/acpi/suspend.c
+++ b/xen/arch/x86/acpi/suspend.c
@@ -32,7 +32,8 @@ void save_rest_processor_state(void)
     rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
     rdmsrl(MSR_CSTAR, saved_cstar);
     rdmsrl(MSR_LSTAR, saved_lstar);
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
         rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
@@ -59,7 +60,8 @@ void restore_rest_processor_state(void)
     wrmsrl(MSR_GS_BASE, saved_gs_base);
     wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* Recover sysenter MSRs */
         wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
--- a/xen/arch/x86/cpu/Makefile
+++ b/xen/arch/x86/cpu/Makefile
@@ -2,10 +2,8 @@ subdir-y += mcheck
 subdir-y += mtrr
 
 obj-y += amd.o
+obj-y += centaur.o
 obj-y += common.o
 obj-y += intel.o
 obj-y += intel_cacheinfo.o
 obj-y += mwait-idle.o
-
-# Keeping around for VIA support (JBeulich)
-# obj-$(x86_32) += centaur.o
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf
 		c->x86_capability[5] = cpuid_edx(0xC0000001);
 	}
 
-	/* Cyrix III family needs CX8 & PGE explicity enabled. */
-	if (c->x86_model >=6 && c->x86_model <= 9) {
-		rdmsrl(MSR_VIA_FCR, msr_content);
-		wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
-		set_bit(X86_FEATURE_CX8, c->x86_capability);
+	if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+		c->x86_cache_alignment = c->x86_clflush_size * 2;
+		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
 	}
 
-	/* Before Nehemiah, the C3's had 3dNOW! */
-	if (c->x86_model >=6 && c->x86_model <9)
-		set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-
 	get_model_name(c);
 	display_cacheinfo(c);
 }
 
 static void __init init_centaur(struct cpuinfo_x86 *c)
 {
-	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, c->x86_capability);
-
 	if (c->x86 == 6)
 		init_c3(c);
 }
 
-static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
-	/* VIA C3 CPUs (670-68F) need further shifting. */
-	if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
-		size >>= 8;
-
-	/* VIA also screwed up Nehemiah stepping 1, and made
-	   it return '65KB' instead of '64KB'
-	   - Note, it seems this may only be in engineering samples. */
-	if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
-		size -=1;
-
-	return size;
-}
-
 static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
 	.c_vendor	= "Centaur",
 	.c_ident	= { "CentaurHauls" },
 	.c_init		= init_centaur,
-	.c_size_cache	= centaur_size_cache,
 };
 
 int __init centaur_init_cpu(void)
@@ -97,5 +71,3 @@ int __init centaur_init_cpu(void)
 	cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
 	return 0;
 }
-
-//early_arch_initcall(centaur_init_cpu);
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -567,6 +567,7 @@ void __init early_cpu_init(void)
 {
 	intel_cpu_init();
 	amd_init_cpu();
+	centaur_init_cpu();
 	early_cpu_detect();
 }
 /*
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -114,6 +114,7 @@ static int __init hvm_enable(void)
     switch ( boot_cpu_data.x86_vendor )
     {
     case X86_VENDOR_INTEL:
+    case X86_VENDOR_CENTAUR:
         fns = start_vmx();
         break;
     case X86_VENDOR_AMD:
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -151,13 +151,15 @@ nestedhvm_is_n2(struct vcpu *v)
 static int __init
 nestedhvm_setup(void)
 {
-    /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
-    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
-    unsigned int i, order = get_order_from_pages(nr);
+    unsigned int i, nr, order;
 
     if ( !hvm_funcs.name )
         return 0;
 
+    /* Same format and size as hvm_io_bitmap (VMX needs only 2 pages). */
+    nr = !strcmp(hvm_funcs.name, "VMX") ? 2 : 3;
+    order = get_order_from_pages(nr);
+
     /* shadow_io_bitmaps can't be declared static because
      *   they must fulfill hw requirements (page aligned section)
      *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -156,8 +156,7 @@ static void enable_hypercall_page(struct
     *(u32 *)(p + 1) = 0x80000000;
     *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
     *(u8  *)(p + 6) = 0x01;
-    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-                       ? 0xc1 : 0xd9);
+    *(u8  *)(p + 7) = (!strcmp(hvm_funcs.name, "VMX") ? 0xc1 : 0xd9);
     *(u8  *)(p + 8) = 0xc3; /* ret */
     memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
 
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x
                 break;
 
             /* Currently only EPT is supported */
-            if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+            if ( strcmp(hvm_funcs.name, "VMX") )
                 break;
 
             rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -83,7 +83,7 @@ static void p2m_initialise(struct domain
 
     p2m->cr3 = CR3_EADDR;
 
-    if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
+    if ( hap_enabled(d) && !strcmp(hvm_funcs.name, "VMX") )
         ept_p2m_init(p2m);
     else
         p2m_pt_init(p2m);
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init
     wrmsrl(MSR_LSTAR, (unsigned long)stack);
     stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* SYSENTER entry. */
         wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86: enable VIA CPU support
  2012-09-21 11:40 [PATCH] x86: enable VIA CPU support Jan Beulich
@ 2012-09-21 12:55 ` Keir Fraser
  2012-09-21 13:22   ` Jan Beulich
  2012-09-21 14:24   ` [PATCH, v2] " Jan Beulich
  0 siblings, 2 replies; 5+ messages in thread
From: Keir Fraser @ 2012-09-21 12:55 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 21/09/2012 12:40, "Jan Beulich" <JBeulich@suse.com> wrote:

> Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
> recognized for these purposes, at once stripping off any 32-bit CPU
> only bits from the respective CPU support file.
> 
> This particularly implies untying the VMX == Intel assumption in a few
> places.

Why can't we use 'cpu_has_vmx' instead of your strcmp construct?

It strikes me if it's safe to use in the one place it already is
(HVM_CR4_GUEST_RESERVED_BITS) then it should be safe for your use too. If
it's not then it's probably unsafe in its current use, and we should change
the definition of cpu_has_vmx in a preparatory patch.

 -- Keir

> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> ---
> Note that my testing of this functionality wasn't as wide as I would
> have hoped it to be, since the box I was provided only survived the
> first few days - meanwhile it doesn't stay up long enough to just build
> hypervisor and tools. Therefore, further fixes to fully support these
> CPUs may be needed as the VIA folks themselves get to test that code.
> 
> --- a/xen/arch/x86/acpi/suspend.c
> +++ b/xen/arch/x86/acpi/suspend.c
> @@ -32,7 +32,8 @@ void save_rest_processor_state(void)
>      rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
>      rdmsrl(MSR_CSTAR, saved_cstar);
>      rdmsrl(MSR_LSTAR, saved_lstar);
> -    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
> +    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
> +         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
>      {
>          rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
>          rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
> @@ -59,7 +60,8 @@ void restore_rest_processor_state(void)
>      wrmsrl(MSR_GS_BASE, saved_gs_base);
>      wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
>  
> -    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
> +    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
> +         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
>      {
>          /* Recover sysenter MSRs */
>          wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
> --- a/xen/arch/x86/cpu/Makefile
> +++ b/xen/arch/x86/cpu/Makefile
> @@ -2,10 +2,8 @@ subdir-y += mcheck
>  subdir-y += mtrr
>  
>  obj-y += amd.o
> +obj-y += centaur.o
>  obj-y += common.o
>  obj-y += intel.o
>  obj-y += intel_cacheinfo.o
>  obj-y += mwait-idle.o
> -
> -# Keeping around for VIA support (JBeulich)
> -# obj-$(x86_32) += centaur.o
> --- a/xen/arch/x86/cpu/centaur.c
> +++ b/xen/arch/x86/cpu/centaur.c
> @@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf
> c->x86_capability[5] = cpuid_edx(0xC0000001);
> }
>  
> - /* Cyrix III family needs CX8 & PGE explicity enabled. */
> - if (c->x86_model >=6 && c->x86_model <= 9) {
> -  rdmsrl(MSR_VIA_FCR, msr_content);
> -  wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
> -  set_bit(X86_FEATURE_CX8, c->x86_capability);
> + if (c->x86 == 0x6 && c->x86_model >= 0xf) {
> +  c->x86_cache_alignment = c->x86_clflush_size * 2;
> +  set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
> }
>  
> - /* Before Nehemiah, the C3's had 3dNOW! */
> - if (c->x86_model >=6 && c->x86_model <9)
> -  set_bit(X86_FEATURE_3DNOW, c->x86_capability);
> -
> get_model_name(c);
> display_cacheinfo(c);
>  }
>  
>  static void __init init_centaur(struct cpuinfo_x86 *c)
>  {
> - /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
> -    3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
> - clear_bit(0*32+31, c->x86_capability);
> -
> if (c->x86 == 6)
> init_c3(c);
>  }
>  
> -static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int
> size)
> -{
> - /* VIA C3 CPUs (670-68F) need further shifting. */
> - if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
> -  size >>= 8;
> -
> - /* VIA also screwed up Nehemiah stepping 1, and made
> -    it return '65KB' instead of '64KB'
> -    - Note, it seems this may only be in engineering samples. */
> - if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
> -  size -=1;
> -
> - return size;
> -}
> -
>  static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
> .c_vendor = "Centaur",
> .c_ident = { "CentaurHauls" },
> .c_init  = init_centaur,
> - .c_size_cache = centaur_size_cache,
>  };
>  
>  int __init centaur_init_cpu(void)
> @@ -97,5 +71,3 @@ int __init centaur_init_cpu(void)
> cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
> return 0;
>  }
> -
> -//early_arch_initcall(centaur_init_cpu);
> --- a/xen/arch/x86/cpu/common.c
> +++ b/xen/arch/x86/cpu/common.c
> @@ -567,6 +567,7 @@ void __init early_cpu_init(void)
>  {
> intel_cpu_init();
> amd_init_cpu();
> + centaur_init_cpu();
> early_cpu_detect();
>  }
>  /*
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -114,6 +114,7 @@ static int __init hvm_enable(void)
>      switch ( boot_cpu_data.x86_vendor )
>      {
>      case X86_VENDOR_INTEL:
> +    case X86_VENDOR_CENTAUR:
>          fns = start_vmx();
>          break;
>      case X86_VENDOR_AMD:
> --- a/xen/arch/x86/hvm/nestedhvm.c
> +++ b/xen/arch/x86/hvm/nestedhvm.c
> @@ -151,13 +151,15 @@ nestedhvm_is_n2(struct vcpu *v)
>  static int __init
>  nestedhvm_setup(void)
>  {
> -    /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
> -    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
> -    unsigned int i, order = get_order_from_pages(nr);
> +    unsigned int i, nr, order;
>  
>      if ( !hvm_funcs.name )
>          return 0;
>  
> +    /* Same format and size as hvm_io_bitmap (VMX needs only 2 pages). */
> +    nr = !strcmp(hvm_funcs.name, "VMX") ? 2 : 3;
> +    order = get_order_from_pages(nr);
> +
>      /* shadow_io_bitmaps can't be declared static because
>       *   they must fulfill hw requirements (page aligned section)
>       *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -156,8 +156,7 @@ static void enable_hypercall_page(struct
>      *(u32 *)(p + 1) = 0x80000000;
>      *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
>      *(u8  *)(p + 6) = 0x01;
> -    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
> -                       ? 0xc1 : 0xd9);
> +    *(u8  *)(p + 7) = (!strcmp(hvm_funcs.name, "VMX") ? 0xc1 : 0xd9);
>      *(u8  *)(p + 8) = 0xc3; /* ret */
>      memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
>  
> --- a/xen/arch/x86/mm/mem_event.c
> +++ b/xen/arch/x86/mm/mem_event.c
> @@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x
>                  break;
>  
>              /* Currently only EPT is supported */
> -            if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
> +            if ( strcmp(hvm_funcs.name, "VMX") )
>                  break;
>  
>              rc = mem_event_enable(d, mec, med, _VPF_mem_access,
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -83,7 +83,7 @@ static void p2m_initialise(struct domain
>  
>      p2m->cr3 = CR3_EADDR;
>  
> -    if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
> +    if ( hap_enabled(d) && !strcmp(hvm_funcs.name, "VMX") )
>          ept_p2m_init(p2m);
>      else
>          p2m_pt_init(p2m);
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init
>      wrmsrl(MSR_LSTAR, (unsigned long)stack);
>      stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
>  
> -    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
> +    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
> +         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
>      {
>          /* SYSENTER entry. */
>          wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86: enable VIA CPU support
  2012-09-21 12:55 ` Keir Fraser
@ 2012-09-21 13:22   ` Jan Beulich
  2012-09-21 14:24   ` [PATCH, v2] " Jan Beulich
  1 sibling, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2012-09-21 13:22 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel

>>> On 21.09.12 at 14:55, Keir Fraser <keir.xen@gmail.com> wrote:
> On 21/09/2012 12:40, "Jan Beulich" <JBeulich@suse.com> wrote:
> 
>> Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
>> recognized for these purposes, at once stripping off any 32-bit CPU
>> only bits from the respective CPU support file.
>> 
>> This particularly implies untying the VMX == Intel assumption in a few
>> places.
> 
> Why can't we use 'cpu_has_vmx' instead of your strcmp construct?

Honestly, I didn't even notice we had this (if it's there, I would
have expected it to be used instead of vendor checks).

> It strikes me if it's safe to use in the one place it already is
> (HVM_CR4_GUEST_RESERVED_BITS) then it should be safe for your use too. If
> it's not then it's probably unsafe in its current use, and we should change
> the definition of cpu_has_vmx in a preparatory patch.

Indeed - I'll adjust the patch accordingly.

Jan

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH, v2] x86: enable VIA CPU support
  2012-09-21 12:55 ` Keir Fraser
  2012-09-21 13:22   ` Jan Beulich
@ 2012-09-21 14:24   ` Jan Beulich
  2012-09-21 14:44     ` Keir Fraser
  1 sibling, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2012-09-21 14:24 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel

[-- Attachment #1: Type: text/plain, Size: 7903 bytes --]

Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
recognized for these purposes, at once stripping off any 32-bit CPU
only bits from the respective CPU support file.

This particularly implies untying the VMX == Intel assumption in a few
places.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

---
v2: Use cpu_has_vmx instead of comparing hvm_funcs.name, as suggested
    by Keir. Extend this to also use this and cpu_has_svm in
    hvm_enable(), making the respective open coded checks in
    start_{svm,vmx}() unnecessary.

Note that my testing of this functionality wasn't as wide as I would
have hoped it to be, since the box I was provided only survived the
first few days - meanwhile it doesn't stay up long enough to just build
hypervisor and tools. Therefore, further fixes to fully support these
CPUs may be needed as the VIA folks themselves get to test that code.

--- a/xen/arch/x86/acpi/suspend.c
+++ b/xen/arch/x86/acpi/suspend.c
@@ -32,7 +32,8 @@ void save_rest_processor_state(void)
     rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
     rdmsrl(MSR_CSTAR, saved_cstar);
     rdmsrl(MSR_LSTAR, saved_lstar);
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
         rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
@@ -59,7 +60,8 @@ void restore_rest_processor_state(void)
     wrmsrl(MSR_GS_BASE, saved_gs_base);
     wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* Recover sysenter MSRs */
         wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
--- a/xen/arch/x86/cpu/Makefile
+++ b/xen/arch/x86/cpu/Makefile
@@ -2,10 +2,8 @@ subdir-y += mcheck
 subdir-y += mtrr
 
 obj-y += amd.o
+obj-y += centaur.o
 obj-y += common.o
 obj-y += intel.o
 obj-y += intel_cacheinfo.o
 obj-y += mwait-idle.o
-
-# Keeping around for VIA support (JBeulich)
-# obj-$(x86_32) += centaur.o
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf
 		c->x86_capability[5] = cpuid_edx(0xC0000001);
 	}
 
-	/* Cyrix III family needs CX8 & PGE explicity enabled. */
-	if (c->x86_model >=6 && c->x86_model <= 9) {
-		rdmsrl(MSR_VIA_FCR, msr_content);
-		wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
-		set_bit(X86_FEATURE_CX8, c->x86_capability);
+	if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+		c->x86_cache_alignment = c->x86_clflush_size * 2;
+		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
 	}
 
-	/* Before Nehemiah, the C3's had 3dNOW! */
-	if (c->x86_model >=6 && c->x86_model <9)
-		set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-
 	get_model_name(c);
 	display_cacheinfo(c);
 }
 
 static void __init init_centaur(struct cpuinfo_x86 *c)
 {
-	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, c->x86_capability);
-
 	if (c->x86 == 6)
 		init_c3(c);
 }
 
-static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
-	/* VIA C3 CPUs (670-68F) need further shifting. */
-	if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
-		size >>= 8;
-
-	/* VIA also screwed up Nehemiah stepping 1, and made
-	   it return '65KB' instead of '64KB'
-	   - Note, it seems this may only be in engineering samples. */
-	if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
-		size -=1;
-
-	return size;
-}
-
 static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
 	.c_vendor	= "Centaur",
 	.c_ident	= { "CentaurHauls" },
 	.c_init		= init_centaur,
-	.c_size_cache	= centaur_size_cache,
 };
 
 int __init centaur_init_cpu(void)
@@ -97,5 +71,3 @@ int __init centaur_init_cpu(void)
 	cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
 	return 0;
 }
-
-//early_arch_initcall(centaur_init_cpu);
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -522,6 +522,7 @@ void __init early_cpu_init(void)
 {
 	intel_cpu_init();
 	amd_init_cpu();
+	centaur_init_cpu();
 	early_cpu_detect();
 }
 /*
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -111,17 +111,10 @@ static int __init hvm_enable(void)
 {
     struct hvm_function_table *fns = NULL;
 
-    switch ( boot_cpu_data.x86_vendor )
-    {
-    case X86_VENDOR_INTEL:
+    if ( cpu_has_vmx )
         fns = start_vmx();
-        break;
-    case X86_VENDOR_AMD:
+    else if ( cpu_has_svm )
         fns = start_svm();
-        break;
-    default:
-        break;
-    }
 
     if ( fns == NULL )
         return 0;
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -152,7 +152,7 @@ static int __init
 nestedhvm_setup(void)
 {
     /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
-    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
+    unsigned nr = cpu_has_vmx ? 2 : 3;
     unsigned int i, order = get_order_from_pages(nr);
 
     if ( !hvm_funcs.name )
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1240,9 +1240,6 @@ struct hvm_function_table * __init start
 {
     bool_t printed = 0;
 
-    if ( !test_bit(X86_FEATURE_SVM, &boot_cpu_data.x86_capability) )
-        return NULL;
-
     svm_host_osvw_reset();
 
     if ( svm_cpu_up() )
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -156,8 +156,7 @@ static void enable_hypercall_page(struct
     *(u32 *)(p + 1) = 0x80000000;
     *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
     *(u8  *)(p + 6) = 0x01;
-    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-                       ? 0xc1 : 0xd9);
+    *(u8  *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9);
     *(u8  *)(p + 8) = 0xc3; /* ret */
     memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1516,9 +1516,6 @@ static struct hvm_function_table __read_
 
 struct hvm_function_table * __init start_vmx(void)
 {
-    if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
-        return NULL;
-
     set_in_cr4(X86_CR4_VMXE);
 
     if ( vmx_cpu_up() )
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x
                 break;
 
             /* Currently only EPT is supported */
-            if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+            if ( !cpu_has_vmx )
                 break;
 
             rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -83,7 +83,7 @@ static void p2m_initialise(struct domain
 
     p2m->cr3 = CR3_EADDR;
 
-    if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
+    if ( hap_enabled(d) && cpu_has_vmx )
         ept_p2m_init(p2m);
     else
         p2m_pt_init(p2m);
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init
     wrmsrl(MSR_LSTAR, (unsigned long)stack);
     stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* SYSENTER entry. */
         wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);



[-- Attachment #2: x86-Centaur.patch --]
[-- Type: text/plain, Size: 7930 bytes --]

x86: enable VIA CPU support

Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
recognized for these purposes, at once stripping off any 32-bit CPU
only bits from the respective CPU support file.

This particularly implies untying the VMX == Intel assumption in a few
places.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

---
v2: Use cpu_has_vmx instead of comparing hvm_funcs.name, as suggested
    by Keir. Extend this to also use this and cpu_has_svm in
    hvm_enable(), making the respective open coded checks in
    start_{svm,vmx}() unnecessary.

Note that my testing of this functionality wasn't as wide as I would
have hoped it to be, since the box I was provided only survived the
first few days - meanwhile it doesn't stay up long enough to just build
hypervisor and tools. Therefore, further fixes to fully support these
CPUs may be needed as the VIA folks themselves get to test that code.

--- a/xen/arch/x86/acpi/suspend.c
+++ b/xen/arch/x86/acpi/suspend.c
@@ -32,7 +32,8 @@ void save_rest_processor_state(void)
     rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
     rdmsrl(MSR_CSTAR, saved_cstar);
     rdmsrl(MSR_LSTAR, saved_lstar);
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
         rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
@@ -59,7 +60,8 @@ void restore_rest_processor_state(void)
     wrmsrl(MSR_GS_BASE, saved_gs_base);
     wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* Recover sysenter MSRs */
         wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
--- a/xen/arch/x86/cpu/Makefile
+++ b/xen/arch/x86/cpu/Makefile
@@ -2,10 +2,8 @@ subdir-y += mcheck
 subdir-y += mtrr
 
 obj-y += amd.o
+obj-y += centaur.o
 obj-y += common.o
 obj-y += intel.o
 obj-y += intel_cacheinfo.o
 obj-y += mwait-idle.o
-
-# Keeping around for VIA support (JBeulich)
-# obj-$(x86_32) += centaur.o
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf
 		c->x86_capability[5] = cpuid_edx(0xC0000001);
 	}
 
-	/* Cyrix III family needs CX8 & PGE explicity enabled. */
-	if (c->x86_model >=6 && c->x86_model <= 9) {
-		rdmsrl(MSR_VIA_FCR, msr_content);
-		wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
-		set_bit(X86_FEATURE_CX8, c->x86_capability);
+	if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+		c->x86_cache_alignment = c->x86_clflush_size * 2;
+		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
 	}
 
-	/* Before Nehemiah, the C3's had 3dNOW! */
-	if (c->x86_model >=6 && c->x86_model <9)
-		set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-
 	get_model_name(c);
 	display_cacheinfo(c);
 }
 
 static void __init init_centaur(struct cpuinfo_x86 *c)
 {
-	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-	clear_bit(0*32+31, c->x86_capability);
-
 	if (c->x86 == 6)
 		init_c3(c);
 }
 
-static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
-	/* VIA C3 CPUs (670-68F) need further shifting. */
-	if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
-		size >>= 8;
-
-	/* VIA also screwed up Nehemiah stepping 1, and made
-	   it return '65KB' instead of '64KB'
-	   - Note, it seems this may only be in engineering samples. */
-	if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
-		size -=1;
-
-	return size;
-}
-
 static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
 	.c_vendor	= "Centaur",
 	.c_ident	= { "CentaurHauls" },
 	.c_init		= init_centaur,
-	.c_size_cache	= centaur_size_cache,
 };
 
 int __init centaur_init_cpu(void)
@@ -97,5 +71,3 @@ int __init centaur_init_cpu(void)
 	cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
 	return 0;
 }
-
-//early_arch_initcall(centaur_init_cpu);
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -522,6 +522,7 @@ void __init early_cpu_init(void)
 {
 	intel_cpu_init();
 	amd_init_cpu();
+	centaur_init_cpu();
 	early_cpu_detect();
 }
 /*
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -111,17 +111,10 @@ static int __init hvm_enable(void)
 {
     struct hvm_function_table *fns = NULL;
 
-    switch ( boot_cpu_data.x86_vendor )
-    {
-    case X86_VENDOR_INTEL:
+    if ( cpu_has_vmx )
         fns = start_vmx();
-        break;
-    case X86_VENDOR_AMD:
+    else if ( cpu_has_svm )
         fns = start_svm();
-        break;
-    default:
-        break;
-    }
 
     if ( fns == NULL )
         return 0;
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -152,7 +152,7 @@ static int __init
 nestedhvm_setup(void)
 {
     /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
-    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
+    unsigned nr = cpu_has_vmx ? 2 : 3;
     unsigned int i, order = get_order_from_pages(nr);
 
     if ( !hvm_funcs.name )
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1240,9 +1240,6 @@ struct hvm_function_table * __init start
 {
     bool_t printed = 0;
 
-    if ( !test_bit(X86_FEATURE_SVM, &boot_cpu_data.x86_capability) )
-        return NULL;
-
     svm_host_osvw_reset();
 
     if ( svm_cpu_up() )
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -156,8 +156,7 @@ static void enable_hypercall_page(struct
     *(u32 *)(p + 1) = 0x80000000;
     *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
     *(u8  *)(p + 6) = 0x01;
-    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-                       ? 0xc1 : 0xd9);
+    *(u8  *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9);
     *(u8  *)(p + 8) = 0xc3; /* ret */
     memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1516,9 +1516,6 @@ static struct hvm_function_table __read_
 
 struct hvm_function_table * __init start_vmx(void)
 {
-    if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
-        return NULL;
-
     set_in_cr4(X86_CR4_VMXE);
 
     if ( vmx_cpu_up() )
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x
                 break;
 
             /* Currently only EPT is supported */
-            if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+            if ( !cpu_has_vmx )
                 break;
 
             rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -83,7 +83,7 @@ static void p2m_initialise(struct domain
 
     p2m->cr3 = CR3_EADDR;
 
-    if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
+    if ( hap_enabled(d) && cpu_has_vmx )
         ept_p2m_init(p2m);
     else
         p2m_pt_init(p2m);
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init
     wrmsrl(MSR_LSTAR, (unsigned long)stack);
     stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* SYSENTER entry. */
         wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH, v2] x86: enable VIA CPU support
  2012-09-21 14:24   ` [PATCH, v2] " Jan Beulich
@ 2012-09-21 14:44     ` Keir Fraser
  0 siblings, 0 replies; 5+ messages in thread
From: Keir Fraser @ 2012-09-21 14:44 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel

On 21/09/2012 15:24, "Jan Beulich" <JBeulich@suse.com> wrote:

> Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
> recognized for these purposes, at once stripping off any 32-bit CPU
> only bits from the respective CPU support file.
> 
> This particularly implies untying the VMX == Intel assumption in a few
> places.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Keir Fraser <keir@xen.org>

> ---
> v2: Use cpu_has_vmx instead of comparing hvm_funcs.name, as suggested
>     by Keir. Extend this to also use this and cpu_has_svm in
>     hvm_enable(), making the respective open coded checks in
>     start_{svm,vmx}() unnecessary.
> 
> Note that my testing of this functionality wasn't as wide as I would
> have hoped it to be, since the box I was provided only survived the
> first few days - meanwhile it doesn't stay up long enough to just build
> hypervisor and tools. Therefore, further fixes to fully support these
> CPUs may be needed as the VIA folks themselves get to test that code.
> 
> --- a/xen/arch/x86/acpi/suspend.c
> +++ b/xen/arch/x86/acpi/suspend.c
> @@ -32,7 +32,8 @@ void save_rest_processor_state(void)
>      rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
>      rdmsrl(MSR_CSTAR, saved_cstar);
>      rdmsrl(MSR_LSTAR, saved_lstar);
> -    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
> +    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
> +         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
>      {
>          rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
>          rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
> @@ -59,7 +60,8 @@ void restore_rest_processor_state(void)
>      wrmsrl(MSR_GS_BASE, saved_gs_base);
>      wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
>  
> -    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
> +    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
> +         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
>      {
>          /* Recover sysenter MSRs */
>          wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
> --- a/xen/arch/x86/cpu/Makefile
> +++ b/xen/arch/x86/cpu/Makefile
> @@ -2,10 +2,8 @@ subdir-y += mcheck
>  subdir-y += mtrr
>  
>  obj-y += amd.o
> +obj-y += centaur.o
>  obj-y += common.o
>  obj-y += intel.o
>  obj-y += intel_cacheinfo.o
>  obj-y += mwait-idle.o
> -
> -# Keeping around for VIA support (JBeulich)
> -# obj-$(x86_32) += centaur.o
> --- a/xen/arch/x86/cpu/centaur.c
> +++ b/xen/arch/x86/cpu/centaur.c
> @@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf
> c->x86_capability[5] = cpuid_edx(0xC0000001);
> }
>  
> - /* Cyrix III family needs CX8 & PGE explicity enabled. */
> - if (c->x86_model >=6 && c->x86_model <= 9) {
> -  rdmsrl(MSR_VIA_FCR, msr_content);
> -  wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
> -  set_bit(X86_FEATURE_CX8, c->x86_capability);
> + if (c->x86 == 0x6 && c->x86_model >= 0xf) {
> +  c->x86_cache_alignment = c->x86_clflush_size * 2;
> +  set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
> }
>  
> - /* Before Nehemiah, the C3's had 3dNOW! */
> - if (c->x86_model >=6 && c->x86_model <9)
> -  set_bit(X86_FEATURE_3DNOW, c->x86_capability);
> -
> get_model_name(c);
> display_cacheinfo(c);
>  }
>  
>  static void __init init_centaur(struct cpuinfo_x86 *c)
>  {
> - /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
> -    3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
> - clear_bit(0*32+31, c->x86_capability);
> -
> if (c->x86 == 6)
> init_c3(c);
>  }
>  
> -static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int
> size)
> -{
> - /* VIA C3 CPUs (670-68F) need further shifting. */
> - if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
> -  size >>= 8;
> -
> - /* VIA also screwed up Nehemiah stepping 1, and made
> -    it return '65KB' instead of '64KB'
> -    - Note, it seems this may only be in engineering samples. */
> - if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
> -  size -=1;
> -
> - return size;
> -}
> -
>  static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
> .c_vendor = "Centaur",
> .c_ident = { "CentaurHauls" },
> .c_init  = init_centaur,
> - .c_size_cache = centaur_size_cache,
>  };
>  
>  int __init centaur_init_cpu(void)
> @@ -97,5 +71,3 @@ int __init centaur_init_cpu(void)
> cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
> return 0;
>  }
> -
> -//early_arch_initcall(centaur_init_cpu);
> --- a/xen/arch/x86/cpu/common.c
> +++ b/xen/arch/x86/cpu/common.c
> @@ -522,6 +522,7 @@ void __init early_cpu_init(void)
>  {
> intel_cpu_init();
> amd_init_cpu();
> + centaur_init_cpu();
> early_cpu_detect();
>  }
>  /*
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -111,17 +111,10 @@ static int __init hvm_enable(void)
>  {
>      struct hvm_function_table *fns = NULL;
>  
> -    switch ( boot_cpu_data.x86_vendor )
> -    {
> -    case X86_VENDOR_INTEL:
> +    if ( cpu_has_vmx )
>          fns = start_vmx();
> -        break;
> -    case X86_VENDOR_AMD:
> +    else if ( cpu_has_svm )
>          fns = start_svm();
> -        break;
> -    default:
> -        break;
> -    }
>  
>      if ( fns == NULL )
>          return 0;
> --- a/xen/arch/x86/hvm/nestedhvm.c
> +++ b/xen/arch/x86/hvm/nestedhvm.c
> @@ -152,7 +152,7 @@ static int __init
>  nestedhvm_setup(void)
>  {
>      /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
> -    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
> +    unsigned nr = cpu_has_vmx ? 2 : 3;
>      unsigned int i, order = get_order_from_pages(nr);
>  
>      if ( !hvm_funcs.name )
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1240,9 +1240,6 @@ struct hvm_function_table * __init start
>  {
>      bool_t printed = 0;
>  
> -    if ( !test_bit(X86_FEATURE_SVM, &boot_cpu_data.x86_capability) )
> -        return NULL;
> -
>      svm_host_osvw_reset();
>  
>      if ( svm_cpu_up() )
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -156,8 +156,7 @@ static void enable_hypercall_page(struct
>      *(u32 *)(p + 1) = 0x80000000;
>      *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
>      *(u8  *)(p + 6) = 0x01;
> -    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
> -                       ? 0xc1 : 0xd9);
> +    *(u8  *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9);
>      *(u8  *)(p + 8) = 0xc3; /* ret */
>      memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
>  
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1516,9 +1516,6 @@ static struct hvm_function_table __read_
>  
>  struct hvm_function_table * __init start_vmx(void)
>  {
> -    if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
> -        return NULL;
> -
>      set_in_cr4(X86_CR4_VMXE);
>  
>      if ( vmx_cpu_up() )
> --- a/xen/arch/x86/mm/mem_event.c
> +++ b/xen/arch/x86/mm/mem_event.c
> @@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x
>                  break;
>  
>              /* Currently only EPT is supported */
> -            if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
> +            if ( !cpu_has_vmx )
>                  break;
>  
>              rc = mem_event_enable(d, mec, med, _VPF_mem_access,
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -83,7 +83,7 @@ static void p2m_initialise(struct domain
>  
>      p2m->cr3 = CR3_EADDR;
>  
> -    if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
> +    if ( hap_enabled(d) && cpu_has_vmx )
>          ept_p2m_init(p2m);
>      else
>          p2m_pt_init(p2m);
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init
>      wrmsrl(MSR_LSTAR, (unsigned long)stack);
>      stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
>  
> -    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
> +    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
> +         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
>      {
>          /* SYSENTER entry. */
>          wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);
> 
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2012-09-21 14:44 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-21 11:40 [PATCH] x86: enable VIA CPU support Jan Beulich
2012-09-21 12:55 ` Keir Fraser
2012-09-21 13:22   ` Jan Beulich
2012-09-21 14:24   ` [PATCH, v2] " Jan Beulich
2012-09-21 14:44     ` Keir Fraser

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).