* [PATCH] x86: fix pv cpuid masking
@ 2010-06-15 11:49 Jan Beulich
2010-06-16 13:21 ` Jan Beulich
0 siblings, 1 reply; 3+ messages in thread
From: Jan Beulich @ 2010-06-15 11:49 UTC (permalink / raw)
To: xen-devel
[-- Attachment #1: Type: text/plain, Size: 8423 bytes --]
Invert initial values of the variables parsed into from the command
line, so that completely clearing out one or more of the four bit
fields is possible.
Further, consolidate the command line parameter specifications into
a single place.
Finally, as per "Intel Virtualization Technology FlexMigration
Application Note" (http://www.intel.com/Assets/PDF/manual/323850.pdf),
also handle family 6 model 0x1f.
What remains open is the question whether pv_cpuid() shouldn't also
consume these masks.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- 2010-06-15.orig/xen/arch/x86/cpu/amd.c 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/amd.c 2010-06-15 09:44:02.000000000 +0200
@@ -31,14 +31,6 @@
static char opt_famrev[14];
string_param("cpuid_mask_cpu", opt_famrev);
-/* Finer-grained CPUID feature control. */
-static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
-integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
-integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
-static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
-integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
-integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
-
static inline void wrmsr_amd(unsigned int index, unsigned int lo,
unsigned int hi)
{
@@ -59,7 +51,7 @@ static inline void wrmsr_amd(unsigned in
*
* The processor revision string parameter has precedene.
*/
-static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
+static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
{
static unsigned int feat_ecx, feat_edx;
static unsigned int extfeat_ecx, extfeat_edx;
@@ -74,12 +66,12 @@ static void __devinit set_cpuidmask(stru
ASSERT((status == not_parsed) && (smp_processor_id() == 0));
status = no_mask;
- if (opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
- opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx) {
- feat_ecx = opt_cpuid_mask_ecx ? : ~0U;
- feat_edx = opt_cpuid_mask_edx ? : ~0U;
- extfeat_ecx = opt_cpuid_mask_ext_ecx ? : ~0U;
- extfeat_edx = opt_cpuid_mask_ext_edx ? : ~0U;
+ if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+ opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+ feat_ecx = opt_cpuid_mask_ecx;
+ feat_edx = opt_cpuid_mask_edx;
+ extfeat_ecx = opt_cpuid_mask_ext_ecx;
+ extfeat_edx = opt_cpuid_mask_ext_edx;
} else if (*opt_famrev == '\0') {
return;
} else if (!strcmp(opt_famrev, "fam_0f_rev_c")) {
--- 2010-06-15.orig/xen/arch/x86/cpu/common.c 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/common.c 2010-06-15 09:35:39.000000000 +0200
@@ -22,6 +22,15 @@ static int cachesize_override __cpuinitd
static int disable_x86_fxsr __cpuinitdata;
static int disable_x86_serial_nr __cpuinitdata;
+unsigned int __devinitdata opt_cpuid_mask_ecx = ~0u;
+integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
+unsigned int __devinitdata opt_cpuid_mask_edx = ~0u;
+integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
+unsigned int __devinitdata opt_cpuid_mask_ext_ecx = ~0u;
+integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
+unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
+integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
+
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
/*
--- 2010-06-15.orig/xen/arch/x86/cpu/cpu.h 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/cpu.h 2010-06-15 09:35:39.000000000 +0200
@@ -21,6 +21,9 @@ struct cpu_dev {
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
+extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
+extern unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
+
extern int get_model_name(struct cpuinfo_x86 *c);
extern void display_cacheinfo(struct cpuinfo_x86 *c);
--- 2010-06-15.orig/xen/arch/x86/cpu/intel.c 2010-06-11 11:41:35.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/intel.c 2010-06-15 09:50:44.000000000 +0200
@@ -20,19 +20,6 @@
extern int trap_init_f00f_bug(void);
-/*
- * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
- * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
- * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
- * 'rev down' to E8400, you can set these values in these Xen boot parameters.
- */
-static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
-integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
-integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
-static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
-integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
-integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
-
static int use_xsave = 1;
boolean_param("xsave", use_xsave);
@@ -43,46 +30,57 @@ boolean_param("xsave", use_xsave);
struct movsl_mask movsl_mask __read_mostly;
#endif
-static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
+/*
+ * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
+ * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
+ * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
+ * 'rev down' to E8400, you can set these values in these Xen boot parameters.
+ */
+static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
{
- unsigned int model = c->x86_model;
-
- if (!(opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
- opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx))
- return;
+ const char *extra = "";
- if (c->x86 != 0x6) /* Only family 6 supports this feature */
+ if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+ opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
return;
- if ((model == 0x1d) || ((model == 0x17) && (c->x86_mask >= 4))) {
- wrmsr(MSR_IA32_CPUID_FEATURE_MASK1,
- opt_cpuid_mask_ecx ? : ~0u,
- opt_cpuid_mask_edx ? : ~0u);
- }
+ /* Only family 6 supports this feature */
+ switch ((c->x86 == 6) * c->x86_model) {
+ case 0x17:
+ if ((c->x86_mask & 0x0f) < 4)
+ break;
+ /* fall through */
+ case 0x1d:
+ wrmsr(MSR_INTEL_CPUID_FEATURE_MASK,
+ opt_cpuid_mask_ecx,
+ opt_cpuid_mask_edx);
+ if (!~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
+ return;
+ extra = "extended ";
+ break;
/*
* CPU supports this feature if the processor signature meets the following:
* (CPUID.(EAX=01h):EAX) > 000106A2h, or
* (CPUID.(EAX=01h):EAX) == 000106Exh, 0002065xh, 000206Cxh, 000206Exh, or 000206Fxh
*
*/
- else if (((model == 0x1a) && (c->x86_mask > 2))
- || model == 0x1e
- || model == 0x25
- || model == 0x2c
- || model == 0x2e
- || model == 0x2f) {
- wrmsr(MSR_IA32_CPUID1_FEATURE_MASK,
- opt_cpuid_mask_ecx ? : ~0u,
- opt_cpuid_mask_edx ? : ~0u);
- wrmsr(MSR_IA32_CPUID80000001_FEATURE_MASK,
- opt_cpuid_mask_ext_ecx ? : ~0u,
- opt_cpuid_mask_ext_edx ? : ~0u);
- }
- else {
- printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n",
- smp_processor_id());
+ case 0x1a:
+ if ((c->x86_mask & 0x0f) <= 2)
+ break;
+ /* fall through */
+ case 0x1e: case 0x1f:
+ case 0x25: case 0x2c: case 0x2e: case 0x2f:
+ wrmsr(MSR_INTEL_CPUID1_FEATURE_MASK,
+ opt_cpuid_mask_ecx,
+ opt_cpuid_mask_edx);
+ wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK,
+ opt_cpuid_mask_ext_ecx,
+ opt_cpuid_mask_ext_edx);
return;
}
+
+ printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n",
+ smp_processor_id());
}
void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
--- 2010-06-15.orig/xen/include/asm-x86/msr-index.h 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/include/asm-x86/msr-index.h 2010-06-15 09:35:39.000000000 +0200
@@ -156,10 +156,10 @@
#define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187
-/* MSR for cpuid feature mask */
-#define MSR_IA32_CPUID_FEATURE_MASK1 0x00000478
-#define MSR_IA32_CPUID1_FEATURE_MASK 0x00000130
-#define MSR_IA32_CPUID80000001_FEATURE_MASK 0x00000131
+/* MSRs for Intel cpuid feature mask */
+#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
/* MSRs & bits used for VMX enabling */
#define MSR_IA32_VMX_BASIC 0x480
[-- Attachment #2: x86-pv-cpuid-mask.patch --]
[-- Type: text/plain, Size: 8419 bytes --]
Invert initial values of the variables parsed into from the command
line, so that completely clearing out one or more of the four bit
fields is possible.
Further, consolidate the command line parameter specifications into
a single place.
Finally, as per "Intel Virtualization Technology FlexMigration
Application Note" (http://www.intel.com/Assets/PDF/manual/323850.pdf),
also handle family 6 model 0x1f.
What remains open is the question whether pv_cpuid() shouldn't also
consume these masks.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- 2010-06-15.orig/xen/arch/x86/cpu/amd.c 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/amd.c 2010-06-15 09:44:02.000000000 +0200
@@ -31,14 +31,6 @@
static char opt_famrev[14];
string_param("cpuid_mask_cpu", opt_famrev);
-/* Finer-grained CPUID feature control. */
-static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
-integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
-integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
-static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
-integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
-integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
-
static inline void wrmsr_amd(unsigned int index, unsigned int lo,
unsigned int hi)
{
@@ -59,7 +51,7 @@ static inline void wrmsr_amd(unsigned in
*
* The processor revision string parameter has precedene.
*/
-static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
+static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
{
static unsigned int feat_ecx, feat_edx;
static unsigned int extfeat_ecx, extfeat_edx;
@@ -74,12 +66,12 @@ static void __devinit set_cpuidmask(stru
ASSERT((status == not_parsed) && (smp_processor_id() == 0));
status = no_mask;
- if (opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
- opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx) {
- feat_ecx = opt_cpuid_mask_ecx ? : ~0U;
- feat_edx = opt_cpuid_mask_edx ? : ~0U;
- extfeat_ecx = opt_cpuid_mask_ext_ecx ? : ~0U;
- extfeat_edx = opt_cpuid_mask_ext_edx ? : ~0U;
+ if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+ opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+ feat_ecx = opt_cpuid_mask_ecx;
+ feat_edx = opt_cpuid_mask_edx;
+ extfeat_ecx = opt_cpuid_mask_ext_ecx;
+ extfeat_edx = opt_cpuid_mask_ext_edx;
} else if (*opt_famrev == '\0') {
return;
} else if (!strcmp(opt_famrev, "fam_0f_rev_c")) {
--- 2010-06-15.orig/xen/arch/x86/cpu/common.c 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/common.c 2010-06-15 09:35:39.000000000 +0200
@@ -22,6 +22,15 @@ static int cachesize_override __cpuinitd
static int disable_x86_fxsr __cpuinitdata;
static int disable_x86_serial_nr __cpuinitdata;
+unsigned int __devinitdata opt_cpuid_mask_ecx = ~0u;
+integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
+unsigned int __devinitdata opt_cpuid_mask_edx = ~0u;
+integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
+unsigned int __devinitdata opt_cpuid_mask_ext_ecx = ~0u;
+integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
+unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
+integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
+
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
/*
--- 2010-06-15.orig/xen/arch/x86/cpu/cpu.h 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/cpu.h 2010-06-15 09:35:39.000000000 +0200
@@ -21,6 +21,9 @@ struct cpu_dev {
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
+extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
+extern unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
+
extern int get_model_name(struct cpuinfo_x86 *c);
extern void display_cacheinfo(struct cpuinfo_x86 *c);
--- 2010-06-15.orig/xen/arch/x86/cpu/intel.c 2010-06-11 11:41:35.000000000 +0200
+++ 2010-06-15/xen/arch/x86/cpu/intel.c 2010-06-15 09:50:44.000000000 +0200
@@ -20,19 +20,6 @@
extern int trap_init_f00f_bug(void);
-/*
- * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
- * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
- * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
- * 'rev down' to E8400, you can set these values in these Xen boot parameters.
- */
-static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
-integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
-integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
-static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
-integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
-integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
-
static int use_xsave = 1;
boolean_param("xsave", use_xsave);
@@ -43,46 +30,57 @@ boolean_param("xsave", use_xsave);
struct movsl_mask movsl_mask __read_mostly;
#endif
-static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
+/*
+ * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
+ * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
+ * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
+ * 'rev down' to E8400, you can set these values in these Xen boot parameters.
+ */
+static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
{
- unsigned int model = c->x86_model;
-
- if (!(opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
- opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx))
- return;
+ const char *extra = "";
- if (c->x86 != 0x6) /* Only family 6 supports this feature */
+ if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+ opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
return;
- if ((model == 0x1d) || ((model == 0x17) && (c->x86_mask >= 4))) {
- wrmsr(MSR_IA32_CPUID_FEATURE_MASK1,
- opt_cpuid_mask_ecx ? : ~0u,
- opt_cpuid_mask_edx ? : ~0u);
- }
+ /* Only family 6 supports this feature */
+ switch ((c->x86 == 6) * c->x86_model) {
+ case 0x17:
+ if ((c->x86_mask & 0x0f) < 4)
+ break;
+ /* fall through */
+ case 0x1d:
+ wrmsr(MSR_INTEL_CPUID_FEATURE_MASK,
+ opt_cpuid_mask_ecx,
+ opt_cpuid_mask_edx);
+ if (!~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
+ return;
+ extra = "extended ";
+ break;
/*
* CPU supports this feature if the processor signature meets the following:
* (CPUID.(EAX=01h):EAX) > 000106A2h, or
* (CPUID.(EAX=01h):EAX) == 000106Exh, 0002065xh, 000206Cxh, 000206Exh, or 000206Fxh
*
*/
- else if (((model == 0x1a) && (c->x86_mask > 2))
- || model == 0x1e
- || model == 0x25
- || model == 0x2c
- || model == 0x2e
- || model == 0x2f) {
- wrmsr(MSR_IA32_CPUID1_FEATURE_MASK,
- opt_cpuid_mask_ecx ? : ~0u,
- opt_cpuid_mask_edx ? : ~0u);
- wrmsr(MSR_IA32_CPUID80000001_FEATURE_MASK,
- opt_cpuid_mask_ext_ecx ? : ~0u,
- opt_cpuid_mask_ext_edx ? : ~0u);
- }
- else {
- printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n",
- smp_processor_id());
+ case 0x1a:
+ if ((c->x86_mask & 0x0f) <= 2)
+ break;
+ /* fall through */
+ case 0x1e: case 0x1f:
+ case 0x25: case 0x2c: case 0x2e: case 0x2f:
+ wrmsr(MSR_INTEL_CPUID1_FEATURE_MASK,
+ opt_cpuid_mask_ecx,
+ opt_cpuid_mask_edx);
+ wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK,
+ opt_cpuid_mask_ext_ecx,
+ opt_cpuid_mask_ext_edx);
return;
}
+
+ printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n",
+ smp_processor_id());
}
void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
--- 2010-06-15.orig/xen/include/asm-x86/msr-index.h 2010-06-15 09:34:25.000000000 +0200
+++ 2010-06-15/xen/include/asm-x86/msr-index.h 2010-06-15 09:35:39.000000000 +0200
@@ -156,10 +156,10 @@
#define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187
-/* MSR for cpuid feature mask */
-#define MSR_IA32_CPUID_FEATURE_MASK1 0x00000478
-#define MSR_IA32_CPUID1_FEATURE_MASK 0x00000130
-#define MSR_IA32_CPUID80000001_FEATURE_MASK 0x00000131
+/* MSRs for Intel cpuid feature mask */
+#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
/* MSRs & bits used for VMX enabling */
#define MSR_IA32_VMX_BASIC 0x480
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] x86: fix pv cpuid masking
2010-06-15 11:49 [PATCH] x86: fix pv cpuid masking Jan Beulich
@ 2010-06-16 13:21 ` Jan Beulich
2010-06-16 13:29 ` Keir Fraser
0 siblings, 1 reply; 3+ messages in thread
From: Jan Beulich @ 2010-06-16 13:21 UTC (permalink / raw)
To: Keir Fraser; +Cc: xen-devel
>>> On 15.06.10 at 13:49, "Jan Beulich" <JBeulich@novell.com> wrote:
> Invert initial values of the variables parsed into from the command
> line, so that completely clearing out one or more of the four bit
> fields is possible.
>
> Further, consolidate the command line parameter specifications into
> a single place.
>
> Finally, as per "Intel Virtualization Technology FlexMigration
> Application Note" (http://www.intel.com/Assets/PDF/manual/323850.pdf),
> also handle family 6 model 0x1f.
>
> What remains open is the question whether pv_cpuid() shouldn't also
> consume these masks.
What is your opinion on this, Keir?
Jan
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] x86: fix pv cpuid masking
2010-06-16 13:21 ` Jan Beulich
@ 2010-06-16 13:29 ` Keir Fraser
0 siblings, 0 replies; 3+ messages in thread
From: Keir Fraser @ 2010-06-16 13:29 UTC (permalink / raw)
To: Jan Beulich; +Cc: xen-devel@lists.xensource.com
On 16/06/2010 14:21, "Jan Beulich" <JBeulich@novell.com> wrote:
>> What remains open is the question whether pv_cpuid() shouldn't also
>> consume these masks.
>
> What is your opinion on this, Keir?
Libxc/xc_cpuid_x86.c already applies the policy expressed in a domain's
config file, plus its own internal policy, on top of physical host CPUID.
Since host CPUID is already affected by the above-mentioned masks, I don't
think anything further needs to be done.
-- Keir
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2010-06-16 13:29 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-06-15 11:49 [PATCH] x86: fix pv cpuid masking Jan Beulich
2010-06-16 13:21 ` Jan Beulich
2010-06-16 13:29 ` Keir Fraser
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).