* [PATCH v2 0/4] Introductory cleanup for CPUID phase 2 work
@ 2016-12-05 18:24 Andrew Cooper
2016-12-05 18:24 ` [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
` (3 more replies)
0 siblings, 4 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-12-05 18:24 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper
This is some cleanup intended to ease the development of further development
work. There is no practical change from a guests point of view.
Andrew Cooper (4):
x86/vpmu: Move vpmu_do_cpuid() handling into {pv,hvm}_cpuid()
x86/vpmu: Remove core2_no_vpmu_ops
x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
xen/arch/x86/cpu/vpmu.c | 16 +++-------
xen/arch/x86/cpu/vpmu_amd.c | 2 +-
xen/arch/x86/cpu/vpmu_intel.c | 74 +------------------------------------------
xen/arch/x86/hvm/emulate.c | 2 +-
xen/arch/x86/hvm/hvm.c | 72 +++++++++++++++++++++++++----------------
xen/arch/x86/hvm/svm/svm.c | 39 ++---------------------
xen/arch/x86/hvm/vmx/vmx.c | 52 ++----------------------------
xen/arch/x86/traps.c | 69 +++++++++++++++++++++++++++++-----------
xen/include/asm-x86/hvm/hvm.h | 10 ------
xen/include/asm-x86/vpmu.h | 12 +++----
10 files changed, 113 insertions(+), 235 deletions(-)
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
2016-12-05 18:24 [PATCH v2 0/4] Introductory cleanup for CPUID phase 2 work Andrew Cooper
@ 2016-12-05 18:24 ` Andrew Cooper
2016-12-05 20:59 ` Boris Ostrovsky
2016-12-05 18:24 ` [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
` (2 subsequent siblings)
3 siblings, 1 reply; 16+ messages in thread
From: Andrew Cooper @ 2016-12-05 18:24 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Boris Ostrovsky
This reduces the net complexity of CPUID handling by having all adjustments in
at the same place. Remove the now-unused vpmu_do_cpuid() infrastructure.
This involves introducing a vpmu_enabled() predicate, and making the Intel
specific VPMU_CPU_HAS_* constants public.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
xen/arch/x86/cpu/vpmu.c | 10 ---------
xen/arch/x86/cpu/vpmu_intel.c | 52 -------------------------------------------
xen/arch/x86/hvm/hvm.c | 23 +++++++++++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 2 --
xen/arch/x86/traps.c | 26 +++++++++++++++++-----
xen/include/asm-x86/vpmu.h | 10 ++++-----
6 files changed, 48 insertions(+), 75 deletions(-)
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 2f9ddf6..a542f4d 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -347,16 +347,6 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
}
}
-void vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- struct vpmu_struct *vpmu = vcpu_vpmu(current);
-
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_cpuid )
- vpmu->arch_vpmu_ops->do_cpuid(input, eax, ebx, ecx, edx);
-}
-
static void vpmu_save_force(void *arg)
{
struct vcpu *v = (struct vcpu *)arg;
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index e8049ed..e3f25c8 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -68,10 +68,6 @@
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
static bool_t __read_mostly full_width_write;
-/* Intel-specific VPMU features */
-#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
-#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
-
/*
* MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
* counters. 4 bits for every counter.
@@ -782,33 +778,6 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
return 0;
}
-static void core2_vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- switch ( input )
- {
- case 0x1:
-
- if ( vpmu_is_set(vcpu_vpmu(current), VPMU_CPU_HAS_DS) )
- {
- /* Switch on the 'Debug Store' feature in CPUID.EAX[1]:EDX[21] */
- *edx |= cpufeat_mask(X86_FEATURE_DS);
- if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
- *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
- if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
- *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
- }
- break;
-
- case 0xa:
- /* Report at most version 3 since that's all we currently emulate */
- if ( MASK_EXTR(*eax, PMU_VERSION_MASK) > 3 )
- *eax = (*eax & ~PMU_VERSION_MASK) | MASK_INSR(3, PMU_VERSION_MASK);
- break;
- }
-}
-
/* Dump vpmu info on console, called in the context of keyhandler 'q'. */
static void core2_vpmu_dump(const struct vcpu *v)
{
@@ -900,32 +869,12 @@ struct arch_vpmu_ops core2_vpmu_ops = {
.do_wrmsr = core2_vpmu_do_wrmsr,
.do_rdmsr = core2_vpmu_do_rdmsr,
.do_interrupt = core2_vpmu_do_interrupt,
- .do_cpuid = core2_vpmu_do_cpuid,
.arch_vpmu_destroy = core2_vpmu_destroy,
.arch_vpmu_save = core2_vpmu_save,
.arch_vpmu_load = core2_vpmu_load,
.arch_vpmu_dump = core2_vpmu_dump
};
-static void core2_no_vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- /*
- * As in this case the vpmu is not enabled reset some bits in the
- * architectural performance monitoring related part.
- */
- if ( input == 0xa )
- {
- *eax &= ~PMU_VERSION_MASK;
- *eax &= ~PMU_GENERAL_NR_MASK;
- *eax &= ~PMU_GENERAL_WIDTH_MASK;
-
- *edx &= ~PMU_FIXED_NR_MASK;
- *edx &= ~PMU_FIXED_WIDTH_MASK;
- }
-}
-
/*
* If its a vpmu msr set it to 0.
*/
@@ -943,7 +892,6 @@ static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
*/
struct arch_vpmu_ops core2_no_vpmu_ops = {
.do_rdmsr = core2_no_vpmu_do_rdmsr,
- .do_cpuid = core2_no_vpmu_do_cpuid,
};
int vmx_vpmu_initialise(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7763798..1c35dde 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3516,6 +3516,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
*edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
}
+
+ if ( vpmu_enabled(v) &&
+ vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
+ {
+ *edx |= cpufeat_mask(X86_FEATURE_DS);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
+ }
+
break;
case 0x7:
@@ -3646,6 +3657,18 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
}
break;
+ case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
+ {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ /* Report at most version 3 since that's all we currently emulate */
+ if ( (*eax & 0xff) > 3 )
+ *eax = (*eax & ~0xff) | 3;
+ break;
+
case 0x80000001:
*ecx &= hvm_featureset[FEATURESET_e1c];
*edx &= hvm_featureset[FEATURESET_e1d];
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index afde634..505cdea 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2362,8 +2362,6 @@ static void vmx_cpuid_intercept(
break;
}
- vpmu_do_cpuid(input, eax, ebx, ecx, edx);
-
HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d8b68e1..48ac519 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1192,6 +1192,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
}
}
+ if ( vpmu_enabled(curr) &&
+ vpmu_is_set(vcpu_vpmu(curr), VPMU_CPU_HAS_DS) )
+ {
+ d |= cpufeat_mask(X86_FEATURE_DS);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
+ c |= cpufeat_mask(X86_FEATURE_DTES64);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
+ c |= cpufeat_mask(X86_FEATURE_DSCPL);
+ }
+
c |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
break;
@@ -1224,6 +1234,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
a = 0;
break;
+ case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ !vpmu_enabled(curr) )
+ goto unsupported;
+
+ /* Report at most version 3 since that's all we currently emulate. */
+ if ( (a & 0xff) > 3 )
+ a = (a & ~0xff) | 3;
+ break;
+
case XSTATE_CPUID:
if ( !is_control_domain(currd) && !is_hardware_domain(currd) )
@@ -1329,9 +1349,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
b &= pv_featureset[FEATURESET_e8b];
break;
- case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
- break;
-
case 0x00000005: /* MONITOR/MWAIT */
case 0x0000000b: /* Extended Topology Enumeration */
case 0x8000000a: /* SVM revision and features */
@@ -1344,9 +1361,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
}
out:
- /* VPMU may decide to modify some of the leaves */
- vpmu_do_cpuid(leaf, &a, &b, &c, &d);
-
regs->eax = a;
regs->ebx = b;
regs->ecx = c;
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index ed9ec07..d1dda4b 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -25,6 +25,7 @@
#define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu)
#define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu)
+#define vpmu_enabled(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_CONTEXT_ALLOCATED)
#define MSR_TYPE_COUNTER 0
#define MSR_TYPE_CTRL 1
@@ -42,9 +43,6 @@ struct arch_vpmu_ops {
uint64_t supported);
int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
int (*do_interrupt)(struct cpu_user_regs *regs);
- void (*do_cpuid)(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
void (*arch_vpmu_destroy)(struct vcpu *v);
int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
@@ -77,6 +75,10 @@ struct vpmu_struct {
/* PV(H) guests: VPMU registers are accessed by guest from shared page */
#define VPMU_CACHED 0x40
+/* Intel-specific VPMU features */
+#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
+#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
+
static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
{
vpmu->flags |= mask;
@@ -103,8 +105,6 @@ void vpmu_lvtpc_update(uint32_t val);
int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
uint64_t supported, bool_t is_write);
void vpmu_do_interrupt(struct cpu_user_regs *regs);
-void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
void vpmu_initialise(struct vcpu *v);
void vpmu_destroy(struct vcpu *v);
void vpmu_save(struct vcpu *v);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops
2016-12-05 18:24 [PATCH v2 0/4] Introductory cleanup for CPUID phase 2 work Andrew Cooper
2016-12-05 18:24 ` [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
@ 2016-12-05 18:24 ` Andrew Cooper
2016-12-05 21:02 ` Boris Ostrovsky
2016-12-06 8:53 ` Jan Beulich
2016-12-05 18:24 ` [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
2016-12-05 18:24 ` [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
3 siblings, 2 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-12-05 18:24 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Boris Ostrovsky, Jan Beulich
core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
values in Xen.
With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
remaining hook. Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
the nop path in vpmu_do_msr() now suffices.
vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
domains, which enables the removal the duplicate logic in priv_op_read_msr().
Finally, make all arch_vpmu_ops structures const as they are never modified,
and make them static as they are not referred to externally.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
v2:
* s/const static/static const/
* Tweak comment
---
xen/arch/x86/cpu/vpmu.c | 6 +++++-
xen/arch/x86/cpu/vpmu_amd.c | 2 +-
xen/arch/x86/cpu/vpmu_intel.c | 22 +---------------------
xen/arch/x86/traps.c | 6 +-----
xen/include/asm-x86/vpmu.h | 2 +-
5 files changed, 9 insertions(+), 29 deletions(-)
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index a542f4d..4c713be 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -136,9 +136,13 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
const struct arch_vpmu_ops *ops;
int ret = 0;
+ /*
+ * Hide the PMU MSRs if vpmu is not configured, or the hardware domain is
+ * profiling the whole system.
+ */
if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
((vpmu_mode & XENPMU_MODE_ALL) &&
- !is_hardware_domain(current->domain)) )
+ !is_hardware_domain(curr->domain)) )
goto nop;
vpmu = vcpu_vpmu(curr);
diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 55d03b3..43ade13 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -488,7 +488,7 @@ static void amd_vpmu_dump(const struct vcpu *v)
}
}
-struct arch_vpmu_ops amd_vpmu_ops = {
+static const struct arch_vpmu_ops amd_vpmu_ops = {
.do_wrmsr = amd_vpmu_do_wrmsr,
.do_rdmsr = amd_vpmu_do_rdmsr,
.do_interrupt = amd_vpmu_do_interrupt,
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index e3f25c8..e51bc4e 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -865,7 +865,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
vpmu_clear(vpmu);
}
-struct arch_vpmu_ops core2_vpmu_ops = {
+static const struct arch_vpmu_ops core2_vpmu_ops = {
.do_wrmsr = core2_vpmu_do_wrmsr,
.do_rdmsr = core2_vpmu_do_rdmsr,
.do_interrupt = core2_vpmu_do_interrupt,
@@ -875,32 +875,12 @@ struct arch_vpmu_ops core2_vpmu_ops = {
.arch_vpmu_dump = core2_vpmu_dump
};
-/*
- * If its a vpmu msr set it to 0.
- */
-static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
- int type = -1, index = -1;
- if ( !is_core2_vpmu_msr(msr, &type, &index) )
- return -EINVAL;
- *msr_content = 0;
- return 0;
-}
-
-/*
- * These functions are used in case vpmu is not enabled.
- */
-struct arch_vpmu_ops core2_no_vpmu_ops = {
- .do_rdmsr = core2_no_vpmu_do_rdmsr,
-};
-
int vmx_vpmu_initialise(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
u64 msr_content;
static bool_t ds_warned;
- vpmu->arch_vpmu_ops = &core2_no_vpmu_ops;
if ( vpmu_mode == XENPMU_MODE_OFF )
return 0;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 48ac519..638d8ff 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2566,11 +2566,7 @@ static int priv_op_read_msr(unsigned int reg, uint64_t *val,
case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:
if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
{
- /* Don't leak PMU MSRs to unprivileged domains. */
- if ( (vpmu_mode & XENPMU_MODE_ALL) &&
- !is_hardware_domain(currd) )
- *val = 0;
- else if ( vpmu_do_rdmsr(reg, val) )
+ if ( vpmu_do_rdmsr(reg, val) )
break;
return X86EMUL_OKAY;
}
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index d1dda4b..e50618f 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -60,7 +60,7 @@ struct vpmu_struct {
u32 hw_lapic_lvtpc;
void *context; /* May be shared with PV guest */
void *priv_context; /* hypervisor-only */
- struct arch_vpmu_ops *arch_vpmu_ops;
+ const struct arch_vpmu_ops *arch_vpmu_ops;
struct xen_pmu_data *xenpmu_data;
spinlock_t vpmu_lock;
};
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
2016-12-05 18:24 [PATCH v2 0/4] Introductory cleanup for CPUID phase 2 work Andrew Cooper
2016-12-05 18:24 ` [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
2016-12-05 18:24 ` [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
@ 2016-12-05 18:24 ` Andrew Cooper
2016-12-05 21:13 ` Boris Ostrovsky
` (2 more replies)
2016-12-05 18:24 ` [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
3 siblings, 3 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-12-05 18:24 UTC (permalink / raw)
To: Xen-devel
Cc: Kevin Tian, Jan Beulich, Andrew Cooper, Jun Nakajima,
Boris Ostrovsky, Suravee Suthikulpanit
This reduces the net complexity of CPUID handling by having all adjustments in
at the same place. Remove the now-unused hvm_funcs.cpuid_intercept
infrastructure.
The SYSCALL feature hiding is tweaked when moved. In principle, an
administrator can choose to explicitly hide the SYSCALL feature from the
guest, as it has a separate feature bit. If this is the case, the feature
shouldn't be set behind the back of the administrators wishes. (Not that many
64bit OSes would function in this scenario.) In reality, SYSCALL will always
be set in edx at this point.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
v2:
* Update commit message to explain the difference with SYCALL handling.
* Entirely exclude leaf 0x8000001c outside of AMD hardware.
---
xen/arch/x86/hvm/emulate.c | 2 +-
xen/arch/x86/hvm/hvm.c | 27 +++++++++++++++++++++------
xen/arch/x86/hvm/svm/svm.c | 39 ++-------------------------------------
xen/arch/x86/hvm/vmx/vmx.c | 31 ++-----------------------------
xen/include/asm-x86/hvm/hvm.h | 3 ---
5 files changed, 26 insertions(+), 76 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index d0a043b..efae4df 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1565,7 +1565,7 @@ int hvmemul_cpuid(
hvm_check_cpuid_faulting(current) )
return X86EMUL_EXCEPTION;
- hvm_funcs.cpuid_intercept(eax, ebx, ecx, edx);
+ hvm_cpuid(*eax, eax, ebx, ecx, edx);
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 1c35dde..652a496 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -906,12 +906,7 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
ASSERT(v->domain == current->domain);
hvm_cpuid(0x80000000, &level, NULL, NULL, NULL);
if ( (level >> 16) == 0x8000 && level > 0x80000000 )
- {
- unsigned int dummy;
-
- level = 0x80000001;
- hvm_funcs.cpuid_intercept(&level, &dummy, &ext1_ecx, &ext1_edx);
- }
+ hvm_cpuid(0x80000001, NULL, NULL, &ext1_ecx, &ext1_edx);
}
else
{
@@ -3702,6 +3697,12 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
*edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
}
+
+ /* SYSCALL is hidden outside of long mode on Intel. */
+ if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
+ !hvm_long_mode_enabled(v))
+ *edx &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
+
break;
case 0x80000007:
@@ -3726,6 +3727,20 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
*ebx &= hvm_featureset[FEATURESET_e8b];
break;
+
+ case 0x8000001c:
+ if ( !cpu_has_svm )
+ {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ if ( cpu_has_lwp && (v->arch.xcr0 & XSTATE_LWP) )
+ /* Turn on available bit and other features specified in lwp_cfg. */
+ *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
+ else
+ *eax = 0;
+ break;
}
}
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 1588b2f..df087d9 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1565,41 +1565,6 @@ static void svm_fpu_dirty_intercept(void)
vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS);
}
-static void svm_cpuid_intercept(
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- unsigned int input = *eax;
- struct vcpu *v = current;
-
- hvm_cpuid(input, eax, ebx, ecx, edx);
-
- switch (input) {
- case 0x8000001c:
- {
- /* LWP capability CPUID */
- uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg;
-
- if ( cpu_has_lwp )
- {
- if ( !(v->arch.xcr0 & XSTATE_LWP) )
- {
- *eax = 0x0;
- break;
- }
-
- /* turn on available bit and other features specified in lwp_cfg */
- *eax = (*edx & lwp_cfg) | 0x00000001;
- }
- break;
- }
- default:
- break;
- }
-
- HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
-}
-
static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
{
unsigned int eax, ebx, ecx, edx, inst_len;
@@ -1612,7 +1577,8 @@ static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
ecx = regs->ecx;
edx = regs->edx;
- svm_cpuid_intercept(&eax, &ebx, &ecx, &edx);
+ hvm_cpuid(regs->_eax, &eax, &ebx, &ecx, &edx);
+ HVMTRACE_5D(CPUID, regs->_eax, eax, ebx, ecx, edx);
regs->eax = eax;
regs->ebx = ebx;
@@ -2244,7 +2210,6 @@ static struct hvm_function_table __initdata svm_function_table = {
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
.invlpg = svm_invlpg,
- .cpuid_intercept = svm_cpuid_intercept,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
.msr_read_intercept = svm_msr_read_intercept,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 505cdea..499b300 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -73,9 +73,6 @@ static void vmx_install_vlapic_mapping(struct vcpu *v);
static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
static void vmx_update_guest_efer(struct vcpu *v);
static void vmx_update_guest_vendor(struct vcpu *v);
-static void vmx_cpuid_intercept(
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
static void vmx_wbinvd_intercept(void);
static void vmx_fpu_dirty_intercept(void);
static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
@@ -2105,7 +2102,6 @@ static struct hvm_function_table __initdata vmx_function_table = {
.invlpg = vmx_invlpg,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
- .cpuid_intercept = vmx_cpuid_intercept,
.wbinvd_intercept = vmx_wbinvd_intercept,
.fpu_dirty_intercept = vmx_fpu_dirty_intercept,
.msr_read_intercept = vmx_msr_read_intercept,
@@ -2341,30 +2337,6 @@ static void vmx_fpu_dirty_intercept(void)
}
}
-static void vmx_cpuid_intercept(
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- unsigned int input = *eax;
- struct vcpu *v = current;
-
- hvm_cpuid(input, eax, ebx, ecx, edx);
-
- switch ( input )
- {
- case 0x80000001:
- /* SYSCALL is visible iff running in long mode. */
- if ( hvm_long_mode_enabled(v) )
- *edx |= cpufeat_mask(X86_FEATURE_SYSCALL);
- else
- *edx &= ~(cpufeat_mask(X86_FEATURE_SYSCALL));
-
- break;
- }
-
- HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
-}
-
static int vmx_do_cpuid(struct cpu_user_regs *regs)
{
unsigned int eax, ebx, ecx, edx;
@@ -2384,7 +2356,8 @@ static int vmx_do_cpuid(struct cpu_user_regs *regs)
leaf = regs->eax;
subleaf = regs->ecx;
- vmx_cpuid_intercept(&eax, &ebx, &ecx, &edx);
+ hvm_cpuid(leaf, &eax, &ebx, &ecx, &edx);
+ HVMTRACE_5D(CPUID, leaf, eax, ebx, ecx, edx);
regs->eax = eax;
regs->ebx = ebx;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index b37b335..1abe743 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -161,9 +161,6 @@ struct hvm_function_table {
unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);
/* Instruction intercepts: non-void return values are X86EMUL codes. */
- void (*cpuid_intercept)(
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
void (*wbinvd_intercept)(void);
void (*fpu_dirty_intercept)(void);
int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
2016-12-05 18:24 [PATCH v2 0/4] Introductory cleanup for CPUID phase 2 work Andrew Cooper
` (2 preceding siblings ...)
2016-12-05 18:24 ` [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
@ 2016-12-05 18:24 ` Andrew Cooper
2016-12-06 6:11 ` Tian, Kevin
2016-12-06 9:02 ` Jan Beulich
3 siblings, 2 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-12-05 18:24 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima, Jan Beulich
This reduces the net complexity of CPUID handling by having all adjustments in
at the same place. Remove the now-unused hvm_funcs.hypervisor_cpuid_leaf()
infrastructure.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
v2:
* Reorder the position of the blanket zero
---
xen/arch/x86/hvm/hvm.c | 22 ----------------------
xen/arch/x86/hvm/vmx/vmx.c | 19 -------------------
xen/arch/x86/traps.c | 37 ++++++++++++++++++++++++++++++-------
xen/include/asm-x86/hvm/hvm.h | 7 -------
4 files changed, 30 insertions(+), 55 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 652a496..c5dfd6e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3401,28 +3401,6 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
return rc ? len : 0; /* fake a copy_from_user() return code */
}
-void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- *eax = *ebx = *ecx = *edx = 0;
- if ( hvm_funcs.hypervisor_cpuid_leaf )
- hvm_funcs.hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx);
-
- if ( sub_idx == 0 )
- {
- /*
- * Indicate that memory mapped from other domains (either grants or
- * foreign pages) has valid IOMMU entries.
- */
- *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
-
- /* Indicate presence of vcpu id and set it in ebx */
- *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
- *ebx = current->vcpu_id;
- }
-}
-
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 499b300..350b945 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1908,24 +1908,6 @@ static void vmx_handle_eoi(u8 vector)
__vmwrite(GUEST_INTR_STATUS, status);
}
-void vmx_hypervisor_cpuid_leaf(uint32_t sub_idx,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- if ( sub_idx != 0 )
- return;
- if ( cpu_has_vmx_apic_reg_virt )
- *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
- /*
- * We want to claim that x2APIC is virtualized if APIC MSR accesses are
- * not intercepted. When all three of these are true both rdmsr and wrmsr
- * in the guest will run without VMEXITs (see vmx_vlapic_msr_changed()).
- */
- if ( cpu_has_vmx_virtualize_x2apic_mode && cpu_has_vmx_apic_reg_virt &&
- cpu_has_vmx_virtual_intr_delivery )
- *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
-}
-
static void vmx_enable_msr_interception(struct domain *d, uint32_t msr)
{
struct vcpu *v;
@@ -2126,7 +2108,6 @@ static struct hvm_function_table __initdata vmx_function_table = {
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.handle_eoi = vmx_handle_eoi,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
- .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
.enable_msr_interception = vmx_enable_msr_interception,
.is_singlestep_supported = vmx_is_singlestep_supported,
.set_mode = vmx_set_mode,
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 638d8ff..5ddc408 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -907,7 +907,8 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
- struct domain *currd = current->domain;
+ struct vcpu *curr = current;
+ struct domain *currd = curr->domain;
/* Optionally shift out of the way of Viridian architectural leaves. */
uint32_t base = is_viridian_domain(currd) ? 0x40000100 : 0x40000000;
uint32_t limit, dummy;
@@ -999,13 +1000,35 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
}
break;
- case 4:
- if ( !has_hvm_container_domain(currd) )
- {
- *eax = *ebx = *ecx = *edx = 0;
+ case 4: /* HVM hypervisor leaf. */
+ *eax = *ebx = *ecx = *edx = 0;
+
+ if ( !has_hvm_container_domain(currd) || sub_idx != 0 )
break;
- }
- hvm_hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx);
+
+ if ( cpu_has_vmx_apic_reg_virt )
+ *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
+
+ /*
+ * We want to claim that x2APIC is virtualized if APIC MSR accesses
+ * are not intercepted. When all three of these are true both rdmsr
+ * and wrmsr in the guest will run without VMEXITs (see
+ * vmx_vlapic_msr_changed()).
+ */
+ if ( cpu_has_vmx_virtualize_x2apic_mode &&
+ cpu_has_vmx_apic_reg_virt &&
+ cpu_has_vmx_virtual_intr_delivery )
+ *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
+
+ /*
+ * Indicate that memory mapped from other domains (either grants or
+ * foreign pages) has valid IOMMU entries.
+ */
+ *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
+
+ /* Indicate presence of vcpu id and set it in ebx */
+ *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
+ *ebx = curr->vcpu_id;
break;
default:
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 1abe743..b89b209 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -198,10 +198,6 @@ struct hvm_function_table {
uint8_t *p2m_acc, bool_t access_r,
bool_t access_w, bool_t access_x);
- void (*hypervisor_cpuid_leaf)(uint32_t sub_idx,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx);
-
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
bool_t (*is_singlestep_supported)(void);
int (*set_mode)(struct vcpu *v, int mode);
@@ -388,9 +384,6 @@ bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
#define has_viridian_apic_assist(d) \
(is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
-void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx);
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
bool hvm_check_cpuid_faulting(struct vcpu *v);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
2016-12-05 18:24 ` [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
@ 2016-12-05 20:59 ` Boris Ostrovsky
2016-12-06 11:11 ` Andrew Cooper
0 siblings, 1 reply; 16+ messages in thread
From: Boris Ostrovsky @ 2016-12-05 20:59 UTC (permalink / raw)
To: Andrew Cooper, Xen-devel
On 12/05/2016 01:24 PM, Andrew Cooper wrote:
> @@ -3516,6 +3516,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
> if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
> *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
> }
> +
> + if ( vpmu_enabled(v) &&
> + vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
> + {
> + *edx |= cpufeat_mask(X86_FEATURE_DS);
> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
> + *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
> + *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
> + }
> +
> break;
>
> case 0x7:
> @@ -3646,6 +3657,18 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
> }
> break;
>
> + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
> + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
> + {
> + *eax = *ebx = *ecx = *edx = 0;
> + break;
> + }
> +
> + /* Report at most version 3 since that's all we currently emulate */
> + if ( (*eax & 0xff) > 3 )
> + *eax = (*eax & ~0xff) | 3;
> + break;
Both this and Debug Store checks are the same for both HVM and PV. Can
they be factored out? (and then perhaps version update can gain back
PMU_VERSION_MASK macro)
-boris
> @@ -1192,6 +1192,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
> }
> }
>
> + if ( vpmu_enabled(curr) &&
> + vpmu_is_set(vcpu_vpmu(curr), VPMU_CPU_HAS_DS) )
> + {
> + d |= cpufeat_mask(X86_FEATURE_DS);
> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
> + c |= cpufeat_mask(X86_FEATURE_DTES64);
> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
> + c |= cpufeat_mask(X86_FEATURE_DSCPL);
> + }
> +
> c |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
> break;
>
> @@ -1224,6 +1234,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
> a = 0;
> break;
>
> + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
> + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
> + !vpmu_enabled(curr) )
> + goto unsupported;
> +
> + /* Report at most version 3 since that's all we currently emulate. */
> + if ( (a & 0xff) > 3 )
> + a = (a & ~0xff) | 3;
> + break;
> +
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops
2016-12-05 18:24 ` [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
@ 2016-12-05 21:02 ` Boris Ostrovsky
2016-12-06 8:53 ` Jan Beulich
1 sibling, 0 replies; 16+ messages in thread
From: Boris Ostrovsky @ 2016-12-05 21:02 UTC (permalink / raw)
To: Andrew Cooper, Xen-devel; +Cc: Jan Beulich
On 12/05/2016 01:24 PM, Andrew Cooper wrote:
> core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
> values in Xen.
>
> With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
> remaining hook. Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
> intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
> the nop path in vpmu_do_msr() now suffices.
>
> vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
> domains, which enables the removal the duplicate logic in priv_op_read_msr().
>
> Finally, make all arch_vpmu_ops structures const as they are never modified,
> and make them static as they are not referred to externally.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
2016-12-05 18:24 ` [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
@ 2016-12-05 21:13 ` Boris Ostrovsky
2016-12-06 6:08 ` Tian, Kevin
2016-12-06 8:59 ` Jan Beulich
2 siblings, 0 replies; 16+ messages in thread
From: Boris Ostrovsky @ 2016-12-05 21:13 UTC (permalink / raw)
To: Andrew Cooper, Xen-devel
Cc: Suravee Suthikulpanit, Kevin Tian, Jun Nakajima, Jan Beulich
On 12/05/2016 01:24 PM, Andrew Cooper wrote:
> This reduces the net complexity of CPUID handling by having all adjustments in
> at the same place. Remove the now-unused hvm_funcs.cpuid_intercept
> infrastructure.
>
> The SYSCALL feature hiding is tweaked when moved. In principle, an
> administrator can choose to explicitly hide the SYSCALL feature from the
> guest, as it has a separate feature bit. If this is the case, the feature
> shouldn't be set behind the back of the administrators wishes. (Not that many
> 64bit OSes would function in this scenario.) In reality, SYSCALL will always
> be set in edx at this point.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
2016-12-05 18:24 ` [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
2016-12-05 21:13 ` Boris Ostrovsky
@ 2016-12-06 6:08 ` Tian, Kevin
2016-12-06 8:59 ` Jan Beulich
2 siblings, 0 replies; 16+ messages in thread
From: Tian, Kevin @ 2016-12-06 6:08 UTC (permalink / raw)
To: Andrew Cooper, Xen-devel
Cc: Suravee Suthikulpanit, Boris Ostrovsky, Nakajima, Jun,
Jan Beulich
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Tuesday, December 06, 2016 2:25 AM
>
> This reduces the net complexity of CPUID handling by having all adjustments in
remove 'in'
> at the same place. Remove the now-unused hvm_funcs.cpuid_intercept
> infrastructure.
>
> The SYSCALL feature hiding is tweaked when moved. In principle, an
> administrator can choose to explicitly hide the SYSCALL feature from the
> guest, as it has a separate feature bit. If this is the case, the feature
> shouldn't be set behind the back of the administrators wishes. (Not that many
> 64bit OSes would function in this scenario.) In reality, SYSCALL will always
> be set in edx at this point.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
2016-12-05 18:24 ` [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
@ 2016-12-06 6:11 ` Tian, Kevin
2016-12-06 9:02 ` Jan Beulich
1 sibling, 0 replies; 16+ messages in thread
From: Tian, Kevin @ 2016-12-06 6:11 UTC (permalink / raw)
To: Andrew Cooper, Xen-devel; +Cc: Nakajima, Jun, Jan Beulich
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Tuesday, December 06, 2016 2:25 AM
>
> This reduces the net complexity of CPUID handling by having all adjustments in
> at the same place. Remove the now-unused hvm_funcs.hypervisor_cpuid_leaf()
> infrastructure.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Jun Nakajima <jun.nakajima@intel.com>
> CC: Kevin Tian <kevin.tian@intel.com>
>
Acked-by: Kevin Tian <kevin.tian@intel.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops
2016-12-05 18:24 ` [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
2016-12-05 21:02 ` Boris Ostrovsky
@ 2016-12-06 8:53 ` Jan Beulich
1 sibling, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2016-12-06 8:53 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Boris Ostrovsky, Xen-devel
>>> On 05.12.16 at 19:24, <andrew.cooper3@citrix.com> wrote:
> core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
> values in Xen.
>
> With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
> remaining hook. Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
> intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
> the nop path in vpmu_do_msr() now suffices.
>
> vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
> domains, which enables the removal the duplicate logic in priv_op_read_msr().
>
> Finally, make all arch_vpmu_ops structures const as they are never modified,
> and make them static as they are not referred to externally.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
2016-12-05 18:24 ` [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
2016-12-05 21:13 ` Boris Ostrovsky
2016-12-06 6:08 ` Tian, Kevin
@ 2016-12-06 8:59 ` Jan Beulich
2 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2016-12-06 8:59 UTC (permalink / raw)
To: Andrew Cooper
Cc: Boris Ostrovsky, Kevin Tian, Jun Nakajima, SuraveeSuthikulpanit,
Xen-devel
>>> On 05.12.16 at 19:24, <andrew.cooper3@citrix.com> wrote:
> This reduces the net complexity of CPUID handling by having all adjustments in
> at the same place. Remove the now-unused hvm_funcs.cpuid_intercept
> infrastructure.
>
> The SYSCALL feature hiding is tweaked when moved. In principle, an
> administrator can choose to explicitly hide the SYSCALL feature from the
> guest, as it has a separate feature bit. If this is the case, the feature
> shouldn't be set behind the back of the administrators wishes. (Not that many
> 64bit OSes would function in this scenario.) In reality, SYSCALL will always
> be set in edx at this point.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
2016-12-05 18:24 ` [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
2016-12-06 6:11 ` Tian, Kevin
@ 2016-12-06 9:02 ` Jan Beulich
1 sibling, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2016-12-06 9:02 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Kevin Tian, Jun Nakajima, Xen-devel
>>> On 05.12.16 at 19:24, <andrew.cooper3@citrix.com> wrote:
> This reduces the net complexity of CPUID handling by having all adjustments in
> at the same place. Remove the now-unused hvm_funcs.hypervisor_cpuid_leaf()
> infrastructure.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
2016-12-05 20:59 ` Boris Ostrovsky
@ 2016-12-06 11:11 ` Andrew Cooper
2016-12-09 18:15 ` Andrew Cooper
0 siblings, 1 reply; 16+ messages in thread
From: Andrew Cooper @ 2016-12-06 11:11 UTC (permalink / raw)
To: Boris Ostrovsky, Xen-devel
On 05/12/16 20:59, Boris Ostrovsky wrote:
> On 12/05/2016 01:24 PM, Andrew Cooper wrote:
>> @@ -3516,6 +3516,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>> if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
>> *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
>> }
>> +
>> + if ( vpmu_enabled(v) &&
>> + vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
>> + {
>> + *edx |= cpufeat_mask(X86_FEATURE_DS);
>> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
>> + *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
>> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
>> + *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
>> + }
>> +
>> break;
>>
>> case 0x7:
>> @@ -3646,6 +3657,18 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>> }
>> break;
>>
>> + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
>> + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
>> + {
>> + *eax = *ebx = *ecx = *edx = 0;
>> + break;
>> + }
>> +
>> + /* Report at most version 3 since that's all we currently emulate */
>> + if ( (*eax & 0xff) > 3 )
>> + *eax = (*eax & ~0xff) | 3;
>> + break;
>
> Both this and Debug Store checks are the same for both HVM and PV. Can
> they be factored out?
The purpose of this patch series is to untangle the current call tree to
make it easier to finally merge the PV and HVM paths into a single
guest_cpuid().
Yes, this does add a bit of duplication in the short timer, but allows
for easier movement to the longterm goal.
> (and then perhaps version update can gain back PMU_VERSION_MASK macro)
That involves moving a whole load of Intel internals with generic names
into vpmu.h, which is why I chose not to do it. The end result in
guest_cpuid() won't use it.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
2016-12-06 11:11 ` Andrew Cooper
@ 2016-12-09 18:15 ` Andrew Cooper
2016-12-09 18:28 ` Boris Ostrovsky
0 siblings, 1 reply; 16+ messages in thread
From: Andrew Cooper @ 2016-12-09 18:15 UTC (permalink / raw)
To: Boris Ostrovsky, Xen-devel
On 06/12/16 11:11, Andrew Cooper wrote:
> On 05/12/16 20:59, Boris Ostrovsky wrote:
>> On 12/05/2016 01:24 PM, Andrew Cooper wrote:
>>> @@ -3516,6 +3516,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>> if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
>>> *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
>>> }
>>> +
>>> + if ( vpmu_enabled(v) &&
>>> + vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
>>> + {
>>> + *edx |= cpufeat_mask(X86_FEATURE_DS);
>>> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
>>> + *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
>>> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
>>> + *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
>>> + }
>>> +
>>> break;
>>>
>>> case 0x7:
>>> @@ -3646,6 +3657,18 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>> }
>>> break;
>>>
>>> + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
>>> + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
>>> + {
>>> + *eax = *ebx = *ecx = *edx = 0;
>>> + break;
>>> + }
>>> +
>>> + /* Report at most version 3 since that's all we currently emulate */
>>> + if ( (*eax & 0xff) > 3 )
>>> + *eax = (*eax & ~0xff) | 3;
>>> + break;
>> Both this and Debug Store checks are the same for both HVM and PV. Can
>> they be factored out?
> The purpose of this patch series is to untangle the current call tree to
> make it easier to finally merge the PV and HVM paths into a single
> guest_cpuid().
>
> Yes, this does add a bit of duplication in the short timer, but allows
> for easier movement to the longterm goal.
>
>> (and then perhaps version update can gain back PMU_VERSION_MASK macro)
> That involves moving a whole load of Intel internals with generic names
> into vpmu.h, which is why I chose not to do it. The end result in
> guest_cpuid() won't use it.
Boris: Any further comment, or is my explanation ok? Strictly speaking
the patch isn't blocked on your review, but I'd prefer not to use that
technicality if you are unhappy with it.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
2016-12-09 18:15 ` Andrew Cooper
@ 2016-12-09 18:28 ` Boris Ostrovsky
0 siblings, 0 replies; 16+ messages in thread
From: Boris Ostrovsky @ 2016-12-09 18:28 UTC (permalink / raw)
To: Andrew Cooper, Xen-devel
On 12/09/2016 01:15 PM, Andrew Cooper wrote:
> On 06/12/16 11:11, Andrew Cooper wrote:
>> On 05/12/16 20:59, Boris Ostrovsky wrote:
>>> On 12/05/2016 01:24 PM, Andrew Cooper wrote:
>>>> @@ -3516,6 +3516,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>>> if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
>>>> *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
>>>> }
>>>> +
>>>> + if ( vpmu_enabled(v) &&
>>>> + vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
>>>> + {
>>>> + *edx |= cpufeat_mask(X86_FEATURE_DS);
>>>> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
>>>> + *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
>>>> + if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
>>>> + *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
>>>> + }
>>>> +
>>>> break;
>>>>
>>>> case 0x7:
>>>> @@ -3646,6 +3657,18 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>>> }
>>>> break;
>>>>
>>>> + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
>>>> + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
>>>> + {
>>>> + *eax = *ebx = *ecx = *edx = 0;
>>>> + break;
>>>> + }
>>>> +
>>>> + /* Report at most version 3 since that's all we currently emulate */
>>>> + if ( (*eax & 0xff) > 3 )
>>>> + *eax = (*eax & ~0xff) | 3;
>>>> + break;
>>> Both this and Debug Store checks are the same for both HVM and PV. Can
>>> they be factored out?
>> The purpose of this patch series is to untangle the current call tree to
>> make it easier to finally merge the PV and HVM paths into a single
>> guest_cpuid().
>>
>> Yes, this does add a bit of duplication in the short timer, but allows
>> for easier movement to the longterm goal.
>>
>>> (and then perhaps version update can gain back PMU_VERSION_MASK macro)
>> That involves moving a whole load of Intel internals with generic names
>> into vpmu.h, which is why I chose not to do it. The end result in
>> guest_cpuid() won't use it.
> Boris: Any further comment, or is my explanation ok? Strictly speaking
> the patch isn't blocked on your review, but I'd prefer not to use that
> technicality if you are unhappy with it.
Since in the end both of these concerned will be addressed by
guest_cpuid() implementation I think it's all good.
Thanks.
-boris
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
end of thread, other threads:[~2016-12-09 18:28 UTC | newest]
Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-12-05 18:24 [PATCH v2 0/4] Introductory cleanup for CPUID phase 2 work Andrew Cooper
2016-12-05 18:24 ` [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
2016-12-05 20:59 ` Boris Ostrovsky
2016-12-06 11:11 ` Andrew Cooper
2016-12-09 18:15 ` Andrew Cooper
2016-12-09 18:28 ` Boris Ostrovsky
2016-12-05 18:24 ` [PATCH v2 2/4] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
2016-12-05 21:02 ` Boris Ostrovsky
2016-12-06 8:53 ` Jan Beulich
2016-12-05 18:24 ` [PATCH v2 3/4] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
2016-12-05 21:13 ` Boris Ostrovsky
2016-12-06 6:08 ` Tian, Kevin
2016-12-06 8:59 ` Jan Beulich
2016-12-05 18:24 ` [PATCH v2 4/4] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
2016-12-06 6:11 ` Tian, Kevin
2016-12-06 9:02 ` Jan Beulich
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).