xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work
@ 2016-11-16 12:31 Andrew Cooper
  2016-11-16 12:31 ` [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information Andrew Cooper
                   ` (5 more replies)
  0 siblings, 6 replies; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:31 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper

This is some cleanup intended to ease the development of further development
work.  There is no practical change from a guests point of view.

Andrew Cooper (6):
  xen/x86: Add a helper to calculate family/model/stepping information
  x86/vpmu: Move vpmu_do_cpuid() handling into {pv,hvm}_cpuid()
  x86/vpmu: Remove core2_no_vpmu_ops
  x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  x86/time: Move cpuid_time_leaf() handling into cpuid_hypervisor_leaves()
  x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()

 xen/arch/x86/cpu/common.c       |  36 ++++++++------
 xen/arch/x86/cpu/vpmu.c         |  13 +----
 xen/arch/x86/cpu/vpmu_amd.c     |   2 +-
 xen/arch/x86/cpu/vpmu_intel.c   |  74 +---------------------------
 xen/arch/x86/domctl.c           |   7 +--
 xen/arch/x86/hvm/emulate.c      |   2 +-
 xen/arch/x86/hvm/hvm.c          |  66 ++++++++++++++-----------
 xen/arch/x86/hvm/svm/svm.c      |  39 +--------------
 xen/arch/x86/hvm/vmx/vmx.c      |  52 +-------------------
 xen/arch/x86/time.c             |  36 --------------
 xen/arch/x86/traps.c            | 106 +++++++++++++++++++++++++++++++++-------
 xen/include/asm-x86/hvm/hvm.h   |  10 ----
 xen/include/asm-x86/processor.h |   2 +
 xen/include/asm-x86/time.h      |   3 --
 xen/include/asm-x86/vpmu.h      |  12 ++---
 15 files changed, 166 insertions(+), 294 deletions(-)

-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information
  2016-11-16 12:31 [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work Andrew Cooper
@ 2016-11-16 12:31 ` Andrew Cooper
  2016-11-16 12:49   ` Jan Beulich
  2016-11-16 12:31 ` [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:31 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Jan Beulich

And replace the existing opencoded calculations.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
---
 xen/arch/x86/cpu/common.c       | 36 ++++++++++++++++++++++--------------
 xen/arch/x86/domctl.c           |  7 +------
 xen/include/asm-x86/processor.h |  2 ++
 3 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 3475198..e41a069 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -183,6 +183,25 @@ int get_cpu_vendor(const char v[], enum get_cpu_vendor mode)
 	return X86_VENDOR_UNKNOWN;
 }
 
+u8 get_cpu_family(uint32_t raw, u8 *model, u8 *stepping)
+{
+	u8 fam, mod;
+
+	fam = (raw >> 8) & 0xf;
+	if (fam == 0xf)
+		fam += (raw >> 20) & 0xff;
+
+	mod = (raw >> 4) & 0xf;
+	if (fam >= 0x6)
+		mod |= (raw >> 12) & 0xf0;
+
+	if ( model )
+		*model = mod;
+	if ( stepping )
+		*stepping = raw & 0xf;
+	return fam;
+}
+
 static inline u32 _phys_pkg_id(u32 cpuid_apic, int index_msb)
 {
 	return cpuid_apic >> index_msb;
@@ -222,13 +241,8 @@ static void __init early_cpu_detect(void)
 	c->x86_vendor = get_cpu_vendor(c->x86_vendor_id, gcv_host);
 
 	cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
-	c->x86 = (eax >> 8) & 15;
-	c->x86_model = (eax >> 4) & 15;
-	if (c->x86 == 0xf)
-		c->x86 += (eax >> 20) & 0xff;
-	if (c->x86 >= 0x6)
-		c->x86_model += ((eax >> 16) & 0xF) << 4;
-	c->x86_mask = eax & 15;
+	c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask);
+
 	edx &= ~cleared_caps[cpufeat_word(X86_FEATURE_FPU)];
 	ecx &= ~cleared_caps[cpufeat_word(X86_FEATURE_SSE3)];
 	if (edx & cpufeat_mask(X86_FEATURE_CLFLUSH))
@@ -273,13 +287,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
 
 	/* Model and family information. */
 	cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
-	c->x86 = (eax >> 8) & 15;
-	c->x86_model = (eax >> 4) & 15;
-	if (c->x86 == 0xf)
-		c->x86 += (eax >> 20) & 0xff;
-	if (c->x86 >= 0x6)
-		c->x86_model += ((eax >> 16) & 0xF) << 4;
-	c->x86_mask = eax & 15;
+	c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask);
 	c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
 	c->phys_proc_id = c->apicid;
 
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 2a2fe04..ab9ad39 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -82,12 +82,7 @@ static void update_domain_cpuid_info(struct domain *d,
     }
 
     case 1:
-        d->arch.x86 = (ctl->eax >> 8) & 0xf;
-        if ( d->arch.x86 == 0xf )
-            d->arch.x86 += (ctl->eax >> 20) & 0xff;
-        d->arch.x86_model = (ctl->eax >> 4) & 0xf;
-        if ( d->arch.x86 >= 0x6 )
-            d->arch.x86_model |= (ctl->eax >> 12) & 0xf0;
+        d->arch.x86 = get_cpu_family(ctl->eax, &d->arch.x86_model, NULL);
 
         if ( is_pv_domain(d) && ((levelling_caps & LCAP_1cd) == LCAP_1cd) )
         {
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 6378afd..c3c2afa 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -627,6 +627,8 @@ enum get_cpu_vendor {
 };
 
 int get_cpu_vendor(const char vendor_id[], enum get_cpu_vendor);
+u8 get_cpu_family(uint32_t raw, u8 *model, u8 *stepping);
+
 void pv_cpuid(struct cpu_user_regs *regs);
 
 #endif /* !__ASSEMBLY__ */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
  2016-11-16 12:31 [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work Andrew Cooper
  2016-11-16 12:31 ` [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information Andrew Cooper
@ 2016-11-16 12:31 ` Andrew Cooper
  2016-11-16 12:53   ` Jan Beulich
  2016-11-16 12:31 ` [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:31 UTC (permalink / raw)
  To: Xen-devel
  Cc: Kevin Tian, Andrew Cooper, Boris Ostrovsky, Jun Nakajima,
	Jan Beulich

This reduces the net complexity of CPUID handling by having all adjustments in
at the same place.  Remove the now-unused vpmu_do_cpuid() infrastructure.

This involves introducing a vpmu_enabled() predicate, and making the Intel
specific VPMU_CPU_HAS_* constants public.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/cpu/vpmu.c       | 10 ---------
 xen/arch/x86/cpu/vpmu_intel.c | 52 -------------------------------------------
 xen/arch/x86/hvm/hvm.c        | 23 +++++++++++++++++++
 xen/arch/x86/hvm/vmx/vmx.c    |  2 --
 xen/arch/x86/traps.c          | 26 +++++++++++++++++-----
 xen/include/asm-x86/vpmu.h    | 10 ++++-----
 6 files changed, 48 insertions(+), 75 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 2f9ddf6..a542f4d 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -347,16 +347,6 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
     }
 }
 
-void vpmu_do_cpuid(unsigned int input,
-                   unsigned int *eax, unsigned int *ebx,
-                   unsigned int *ecx, unsigned int *edx)
-{
-    struct vpmu_struct *vpmu = vcpu_vpmu(current);
-
-    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_cpuid )
-        vpmu->arch_vpmu_ops->do_cpuid(input, eax, ebx, ecx, edx);
-}
-
 static void vpmu_save_force(void *arg)
 {
     struct vcpu *v = (struct vcpu *)arg;
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index e8049ed..e3f25c8 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -68,10 +68,6 @@
 #define MSR_PMC_ALIAS_MASK       (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
 static bool_t __read_mostly full_width_write;
 
-/* Intel-specific VPMU features */
-#define VPMU_CPU_HAS_DS                     0x100 /* Has Debug Store */
-#define VPMU_CPU_HAS_BTS                    0x200 /* Has Branch Trace Store */
-
 /*
  * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
  * counters. 4 bits for every counter.
@@ -782,33 +778,6 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
     return 0;
 }
 
-static void core2_vpmu_do_cpuid(unsigned int input,
-                                unsigned int *eax, unsigned int *ebx,
-                                unsigned int *ecx, unsigned int *edx)
-{
-    switch ( input )
-    {
-    case 0x1:
-
-        if ( vpmu_is_set(vcpu_vpmu(current), VPMU_CPU_HAS_DS) )
-        {
-            /* Switch on the 'Debug Store' feature in CPUID.EAX[1]:EDX[21] */
-            *edx |= cpufeat_mask(X86_FEATURE_DS);
-            if ( cpu_has(&current_cpu_data, X86_FEATURE_DTES64) )
-                *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
-            if ( cpu_has(&current_cpu_data, X86_FEATURE_DSCPL) )
-                *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
-        }
-        break;
-
-    case 0xa:
-        /* Report at most version 3 since that's all we currently emulate */
-        if ( MASK_EXTR(*eax, PMU_VERSION_MASK) > 3 )
-            *eax = (*eax & ~PMU_VERSION_MASK) | MASK_INSR(3, PMU_VERSION_MASK);
-        break;
-    }
-}
-
 /* Dump vpmu info on console, called in the context of keyhandler 'q'. */
 static void core2_vpmu_dump(const struct vcpu *v)
 {
@@ -900,32 +869,12 @@ struct arch_vpmu_ops core2_vpmu_ops = {
     .do_wrmsr = core2_vpmu_do_wrmsr,
     .do_rdmsr = core2_vpmu_do_rdmsr,
     .do_interrupt = core2_vpmu_do_interrupt,
-    .do_cpuid = core2_vpmu_do_cpuid,
     .arch_vpmu_destroy = core2_vpmu_destroy,
     .arch_vpmu_save = core2_vpmu_save,
     .arch_vpmu_load = core2_vpmu_load,
     .arch_vpmu_dump = core2_vpmu_dump
 };
 
-static void core2_no_vpmu_do_cpuid(unsigned int input,
-                                unsigned int *eax, unsigned int *ebx,
-                                unsigned int *ecx, unsigned int *edx)
-{
-    /*
-     * As in this case the vpmu is not enabled reset some bits in the
-     * architectural performance monitoring related part.
-     */
-    if ( input == 0xa )
-    {
-        *eax &= ~PMU_VERSION_MASK;
-        *eax &= ~PMU_GENERAL_NR_MASK;
-        *eax &= ~PMU_GENERAL_WIDTH_MASK;
-
-        *edx &= ~PMU_FIXED_NR_MASK;
-        *edx &= ~PMU_FIXED_WIDTH_MASK;
-    }
-}
-
 /*
  * If its a vpmu msr set it to 0.
  */
@@ -943,7 +892,6 @@ static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
  */
 struct arch_vpmu_ops core2_no_vpmu_ops = {
     .do_rdmsr = core2_no_vpmu_do_rdmsr,
-    .do_cpuid = core2_no_vpmu_do_cpuid,
 };
 
 int vmx_vpmu_initialise(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 704fd64..afd14c4 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3491,6 +3491,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
             if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
                 *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
         }
+
+        if ( vpmu_enabled(v) &&
+             vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
+        {
+            *edx |= cpufeat_mask(X86_FEATURE_DS);
+            if ( cpu_has(&current_cpu_data, X86_FEATURE_DTES64) )
+                *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
+            if ( cpu_has(&current_cpu_data, X86_FEATURE_DSCPL) )
+                *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
+        }
+
         break;
 
     case 0x7:
@@ -3620,6 +3631,18 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
         }
         break;
 
+    case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
+        {
+            *eax = *ebx = *ecx = *edx = 0;
+            break;
+        }
+
+        /* Report at most version 3 since that's all we currently emulate */
+        if ( (*eax & 0xff) > 3 )
+            *eax = (*eax & ~0xff) | 3;
+        break;
+
     case 0x80000001:
         *ecx &= hvm_featureset[FEATURESET_e1c];
         *edx &= hvm_featureset[FEATURESET_e1d];
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index a18db28..2238d74 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2423,8 +2423,6 @@ static void vmx_cpuid_intercept(
             break;
     }
 
-    vpmu_do_cpuid(input, eax, ebx, ecx, edx);
-
     HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
 }
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d56d76e..59e6b16 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1120,6 +1120,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
             }
         }
 
+        if ( vpmu_enabled(curr) &&
+             vpmu_is_set(vcpu_vpmu(curr), VPMU_CPU_HAS_DS) )
+        {
+            d |= cpufeat_mask(X86_FEATURE_DS);
+            if ( cpu_has(&current_cpu_data, X86_FEATURE_DTES64) )
+                c |= cpufeat_mask(X86_FEATURE_DTES64);
+            if ( cpu_has(&current_cpu_data, X86_FEATURE_DSCPL) )
+                c |= cpufeat_mask(X86_FEATURE_DSCPL);
+        }
+
         c |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
         break;
 
@@ -1151,6 +1161,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
         a = d = 0;
         break;
 
+    case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+             !vpmu_enabled(curr) )
+            goto unsupported;
+
+        /* Report at most version 3 since that's all we currently emulate. */
+        if ( (a & 0xff) > 3 )
+            a = (a & ~0xff) | 3;
+        break;
+
     case XSTATE_CPUID:
 
         if ( !is_control_domain(currd) && !is_hardware_domain(currd) )
@@ -1256,9 +1276,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
         b &= pv_featureset[FEATURESET_e8b];
         break;
 
-    case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
-        break;
-
     case 0x00000005: /* MONITOR/MWAIT */
     case 0x0000000b: /* Extended Topology Enumeration */
     case 0x8000000a: /* SVM revision and features */
@@ -1271,9 +1288,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
     }
 
  out:
-    /* VPMU may decide to modify some of the leaves */
-    vpmu_do_cpuid(leaf, &a, &b, &c, &d);
-
     regs->eax = a;
     regs->ebx = b;
     regs->ecx = c;
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index ed9ec07..d1dda4b 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -25,6 +25,7 @@
 
 #define vcpu_vpmu(vcpu)   (&(vcpu)->arch.vpmu)
 #define vpmu_vcpu(vpmu)   container_of((vpmu), struct vcpu, arch.vpmu)
+#define vpmu_enabled(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_CONTEXT_ALLOCATED)
 
 #define MSR_TYPE_COUNTER            0
 #define MSR_TYPE_CTRL               1
@@ -42,9 +43,6 @@ struct arch_vpmu_ops {
                     uint64_t supported);
     int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
     int (*do_interrupt)(struct cpu_user_regs *regs);
-    void (*do_cpuid)(unsigned int input,
-                     unsigned int *eax, unsigned int *ebx,
-                     unsigned int *ecx, unsigned int *edx);
     void (*arch_vpmu_destroy)(struct vcpu *v);
     int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
     int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
@@ -77,6 +75,10 @@ struct vpmu_struct {
 /* PV(H) guests: VPMU registers are accessed by guest from shared page */
 #define VPMU_CACHED                         0x40
 
+/* Intel-specific VPMU features */
+#define VPMU_CPU_HAS_DS                     0x100 /* Has Debug Store */
+#define VPMU_CPU_HAS_BTS                    0x200 /* Has Branch Trace Store */
+
 static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
 {
     vpmu->flags |= mask;
@@ -103,8 +105,6 @@ void vpmu_lvtpc_update(uint32_t val);
 int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
                 uint64_t supported, bool_t is_write);
 void vpmu_do_interrupt(struct cpu_user_regs *regs);
-void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
-                                       unsigned int *ecx, unsigned int *edx);
 void vpmu_initialise(struct vcpu *v);
 void vpmu_destroy(struct vcpu *v);
 void vpmu_save(struct vcpu *v);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops
  2016-11-16 12:31 [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work Andrew Cooper
  2016-11-16 12:31 ` [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information Andrew Cooper
  2016-11-16 12:31 ` [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
@ 2016-11-16 12:31 ` Andrew Cooper
  2016-11-16 13:09   ` Jan Beulich
                     ` (2 more replies)
  2016-11-16 12:31 ` [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
                   ` (2 subsequent siblings)
  5 siblings, 3 replies; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:31 UTC (permalink / raw)
  To: Xen-devel
  Cc: Kevin Tian, Andrew Cooper, Boris Ostrovsky, Jun Nakajima,
	Jan Beulich

core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
values in Xen.

With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
remaining hook.  Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
the nop path in vpmu_do_msr() now suffices.

vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
domains, which enables the removal the duplicate logic in priv_op_read_msr().

Finally, make all arch_vpmu_ops structures const as they are never modified,
and make them static as they are not referred to externally.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/cpu/vpmu.c       |  3 ++-
 xen/arch/x86/cpu/vpmu_amd.c   |  2 +-
 xen/arch/x86/cpu/vpmu_intel.c | 22 +---------------------
 xen/arch/x86/traps.c          |  6 +-----
 xen/include/asm-x86/vpmu.h    |  2 +-
 5 files changed, 6 insertions(+), 29 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index a542f4d..1f822ca 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -136,9 +136,10 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
     const struct arch_vpmu_ops *ops;
     int ret = 0;
 
+    /* Don't leak PMU MSRs to unprivileged domains. */
     if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
          ((vpmu_mode & XENPMU_MODE_ALL) &&
-          !is_hardware_domain(current->domain)) )
+          !is_hardware_domain(curr->domain)) )
          goto nop;
 
     vpmu = vcpu_vpmu(curr);
diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 55d03b3..bae81a9 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -488,7 +488,7 @@ static void amd_vpmu_dump(const struct vcpu *v)
     }
 }
 
-struct arch_vpmu_ops amd_vpmu_ops = {
+const static struct arch_vpmu_ops amd_vpmu_ops = {
     .do_wrmsr = amd_vpmu_do_wrmsr,
     .do_rdmsr = amd_vpmu_do_rdmsr,
     .do_interrupt = amd_vpmu_do_interrupt,
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index e3f25c8..babe4cb 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -865,7 +865,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
     vpmu_clear(vpmu);
 }
 
-struct arch_vpmu_ops core2_vpmu_ops = {
+const static struct arch_vpmu_ops core2_vpmu_ops = {
     .do_wrmsr = core2_vpmu_do_wrmsr,
     .do_rdmsr = core2_vpmu_do_rdmsr,
     .do_interrupt = core2_vpmu_do_interrupt,
@@ -875,32 +875,12 @@ struct arch_vpmu_ops core2_vpmu_ops = {
     .arch_vpmu_dump = core2_vpmu_dump
 };
 
-/*
- * If its a vpmu msr set it to 0.
- */
-static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
-    int type = -1, index = -1;
-    if ( !is_core2_vpmu_msr(msr, &type, &index) )
-        return -EINVAL;
-    *msr_content = 0;
-    return 0;
-}
-
-/*
- * These functions are used in case vpmu is not enabled.
- */
-struct arch_vpmu_ops core2_no_vpmu_ops = {
-    .do_rdmsr = core2_no_vpmu_do_rdmsr,
-};
-
 int vmx_vpmu_initialise(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     u64 msr_content;
     static bool_t ds_warned;
 
-    vpmu->arch_vpmu_ops = &core2_no_vpmu_ops;
     if ( vpmu_mode == XENPMU_MODE_OFF )
         return 0;
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 59e6b16..cdce72d 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2532,11 +2532,7 @@ static int priv_op_read_msr(unsigned int reg, uint64_t *val,
     case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:
             if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
             {
-                /* Don't leak PMU MSRs to unprivileged domains. */
-                if ( (vpmu_mode & XENPMU_MODE_ALL) &&
-                     !is_hardware_domain(currd) )
-                    *val = 0;
-                else if ( vpmu_do_rdmsr(reg, val) )
+                if ( vpmu_do_rdmsr(reg, val) )
                     break;
                 return X86EMUL_OKAY;
             }
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index d1dda4b..e50618f 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -60,7 +60,7 @@ struct vpmu_struct {
     u32 hw_lapic_lvtpc;
     void *context;      /* May be shared with PV guest */
     void *priv_context; /* hypervisor-only */
-    struct arch_vpmu_ops *arch_vpmu_ops;
+    const struct arch_vpmu_ops *arch_vpmu_ops;
     struct xen_pmu_data *xenpmu_data;
     spinlock_t vpmu_lock;
 };
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 12:31 [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work Andrew Cooper
                   ` (2 preceding siblings ...)
  2016-11-16 12:31 ` [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
@ 2016-11-16 12:31 ` Andrew Cooper
  2016-11-16 15:20   ` Jan Beulich
  2016-11-16 16:40   ` Boris Ostrovsky
  2016-11-16 12:31 ` [PATCH 5/6] x86/time: Move cpuid_time_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
  2016-11-16 12:31 ` [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() " Andrew Cooper
  5 siblings, 2 replies; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:31 UTC (permalink / raw)
  To: Xen-devel
  Cc: Kevin Tian, Jan Beulich, Andrew Cooper, Jun Nakajima,
	Boris Ostrovsky, Suravee Suthikulpanit

This reduces the net complexity of CPUID handling by having all adjustments in
at the same place.  Remove the now-unused hvm_funcs.cpuid_intercept
infrastructure.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 xen/arch/x86/hvm/emulate.c    |  2 +-
 xen/arch/x86/hvm/hvm.c        | 21 +++++++++++++++------
 xen/arch/x86/hvm/svm/svm.c    | 39 ++-------------------------------------
 xen/arch/x86/hvm/vmx/vmx.c    | 31 ++-----------------------------
 xen/include/asm-x86/hvm/hvm.h |  3 ---
 5 files changed, 20 insertions(+), 76 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index e9b8f8c..540458c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1558,7 +1558,7 @@ int hvmemul_cpuid(
          hvm_check_cpuid_faulting(current) )
         return X86EMUL_EXCEPTION;
 
-    hvm_funcs.cpuid_intercept(eax, ebx, ecx, edx);
+    hvm_cpuid(*eax, eax, ebx, ecx, edx);
     return X86EMUL_OKAY;
 }
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index afd14c4..862ab76 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -906,12 +906,7 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
         ASSERT(v->domain == current->domain);
         hvm_cpuid(0x80000000, &level, NULL, NULL, NULL);
         if ( (level >> 16) == 0x8000 && level > 0x80000000 )
-        {
-            unsigned int dummy;
-
-            level = 0x80000001;
-            hvm_funcs.cpuid_intercept(&level, &dummy, &ext1_ecx, &ext1_edx);
-        }
+            hvm_cpuid(0x80000001, NULL, NULL, &ext1_ecx, &ext1_edx);
     }
     else
     {
@@ -3676,6 +3671,12 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
             if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
                 *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
         }
+
+        /* SYSCALL is hidden outside of long mode on Intel. */
+        if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
+             !hvm_long_mode_enabled(v))
+            *edx &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
+
         break;
 
     case 0x80000007:
@@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
 
         *ebx &= hvm_featureset[FEATURESET_e8b];
         break;
+
+    case 0x8000001c:
+        if ( !(v->arch.xcr0 & XSTATE_LWP) )
+            *eax = 0;
+        else if ( cpu_has_svm && cpu_has_lwp )
+            /* Turn on available bit and other features specified in lwp_cfg. */
+            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
+        break;
     }
 }
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c9dbbea..0668717 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1562,41 +1562,6 @@ static void svm_fpu_dirty_intercept(void)
         vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS);
 }
 
-static void svm_cpuid_intercept(
-    unsigned int *eax, unsigned int *ebx,
-    unsigned int *ecx, unsigned int *edx)
-{
-    unsigned int input = *eax;
-    struct vcpu *v = current;
-
-    hvm_cpuid(input, eax, ebx, ecx, edx);
-
-    switch (input) {
-    case 0x8000001c: 
-    {
-        /* LWP capability CPUID */
-        uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg;
-
-        if ( cpu_has_lwp )
-        {
-            if ( !(v->arch.xcr0 & XSTATE_LWP) )
-           {
-                *eax = 0x0;
-                break;
-            }
-
-            /* turn on available bit and other features specified in lwp_cfg */
-            *eax = (*edx & lwp_cfg) | 0x00000001;
-        }
-        break;
-    }
-    default:
-        break;
-    }
-
-    HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
-}
-
 static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
 {
     unsigned int eax, ebx, ecx, edx, inst_len;
@@ -1609,7 +1574,8 @@ static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
     ecx = regs->ecx;
     edx = regs->edx;
 
-    svm_cpuid_intercept(&eax, &ebx, &ecx, &edx);
+    hvm_cpuid(regs->_eax, &eax, &ebx, &ecx, &edx);
+    HVMTRACE_5D(CPUID, regs->_eax, eax, ebx, ecx, edx);
 
     regs->eax = eax;
     regs->ebx = ebx;
@@ -2253,7 +2219,6 @@ static struct hvm_function_table __initdata svm_function_table = {
     .init_hypercall_page  = svm_init_hypercall_page,
     .event_pending        = svm_event_pending,
     .invlpg               = svm_invlpg,
-    .cpuid_intercept      = svm_cpuid_intercept,
     .wbinvd_intercept     = svm_wbinvd_intercept,
     .fpu_dirty_intercept  = svm_fpu_dirty_intercept,
     .msr_read_intercept   = svm_msr_read_intercept,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2238d74..5a73076 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -73,9 +73,6 @@ static void vmx_install_vlapic_mapping(struct vcpu *v);
 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
 static void vmx_update_guest_efer(struct vcpu *v);
 static void vmx_update_guest_vendor(struct vcpu *v);
-static void vmx_cpuid_intercept(
-    unsigned int *eax, unsigned int *ebx,
-    unsigned int *ecx, unsigned int *edx);
 static void vmx_wbinvd_intercept(void);
 static void vmx_fpu_dirty_intercept(void);
 static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
@@ -2166,7 +2163,6 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .invlpg               = vmx_invlpg,
     .cpu_up               = vmx_cpu_up,
     .cpu_down             = vmx_cpu_down,
-    .cpuid_intercept      = vmx_cpuid_intercept,
     .wbinvd_intercept     = vmx_wbinvd_intercept,
     .fpu_dirty_intercept  = vmx_fpu_dirty_intercept,
     .msr_read_intercept   = vmx_msr_read_intercept,
@@ -2402,30 +2398,6 @@ static void vmx_fpu_dirty_intercept(void)
     }
 }
 
-static void vmx_cpuid_intercept(
-    unsigned int *eax, unsigned int *ebx,
-    unsigned int *ecx, unsigned int *edx)
-{
-    unsigned int input = *eax;
-    struct vcpu *v = current;
-
-    hvm_cpuid(input, eax, ebx, ecx, edx);
-
-    switch ( input )
-    {
-        case 0x80000001:
-            /* SYSCALL is visible iff running in long mode. */
-            if ( hvm_long_mode_enabled(v) )
-                *edx |= cpufeat_mask(X86_FEATURE_SYSCALL);
-            else
-                *edx &= ~(cpufeat_mask(X86_FEATURE_SYSCALL));
-
-            break;
-    }
-
-    HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
-}
-
 static int vmx_do_cpuid(struct cpu_user_regs *regs)
 {
     unsigned int eax, ebx, ecx, edx;
@@ -2445,7 +2417,8 @@ static int vmx_do_cpuid(struct cpu_user_regs *regs)
     leaf = regs->eax;
     subleaf = regs->ecx;
 
-    vmx_cpuid_intercept(&eax, &ebx, &ecx, &edx);
+    hvm_cpuid(leaf, &eax, &ebx, &ecx, &edx);
+    HVMTRACE_5D(CPUID, leaf, eax, ebx, ecx, edx);
 
     regs->eax = eax;
     regs->ebx = ebx;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 7e7462e..8e366c0 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -169,9 +169,6 @@ struct hvm_function_table {
     unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);
 
     /* Instruction intercepts: non-void return values are X86EMUL codes. */
-    void (*cpuid_intercept)(
-        unsigned int *eax, unsigned int *ebx,
-        unsigned int *ecx, unsigned int *edx);
     void (*wbinvd_intercept)(void);
     void (*fpu_dirty_intercept)(void);
     int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 5/6] x86/time: Move cpuid_time_leaf() handling into cpuid_hypervisor_leaves()
  2016-11-16 12:31 [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work Andrew Cooper
                   ` (3 preceding siblings ...)
  2016-11-16 12:31 ` [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
@ 2016-11-16 12:31 ` Andrew Cooper
  2016-11-16 15:47   ` Jan Beulich
  2016-11-16 12:31 ` [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() " Andrew Cooper
  5 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:31 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Jan Beulich

This reduces the net complexity of CPUID handling by having all adjustments in
at the same place.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
---
 xen/arch/x86/time.c        | 36 ------------------------------------
 xen/arch/x86/traps.c       | 40 +++++++++++++++++++++++++++++++++++++---
 xen/include/asm-x86/time.h |  3 ---
 3 files changed, 37 insertions(+), 42 deletions(-)

diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index dda89d8..79707df 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1938,42 +1938,6 @@ int host_tsc_is_safe(void)
     return boot_cpu_has(X86_FEATURE_TSC_RELIABLE);
 }
 
-void cpuid_time_leaf(uint32_t sub_idx, uint32_t *eax, uint32_t *ebx,
-                      uint32_t *ecx, uint32_t *edx)
-{
-    struct domain *d = current->domain;
-    uint64_t offset;
-
-    switch ( sub_idx )
-    {
-    case 0: /* features */
-        *eax = (!!d->arch.vtsc << 0) |
-               (!!host_tsc_is_safe() << 1) |
-               (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2);
-        *ebx = d->arch.tsc_mode;
-        *ecx = d->arch.tsc_khz;
-        *edx = d->arch.incarnation;
-        break;
-    case 1: /* scale and offset */
-        if ( !d->arch.vtsc )
-            offset = d->arch.vtsc_offset;
-        else
-            /* offset already applied to value returned by virtual rdtscp */
-            offset = 0;
-        *eax = (uint32_t)offset;
-        *ebx = (uint32_t)(offset >> 32);
-        *ecx = d->arch.vtsc_to_ns.mul_frac;
-        *edx = (s8)d->arch.vtsc_to_ns.shift;
-        break;
-    case 2: /* physical cpu_khz */
-        *eax = cpu_khz;
-        *ebx = *ecx = *edx = 0;
-        break;
-    default:
-        *eax = *ebx = *ecx = *edx = 0;
-    }
-}
-
 /*
  * called to collect tsc-related data only for save file or live
  * migrate; called after last rdtsc is done on this incarnation
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index cdce72d..9be9fe3 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -922,9 +922,43 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
             *ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
         break;
 
-    case 3:
-        *eax = *ebx = *ecx = *edx = 0;
-        cpuid_time_leaf( sub_idx, eax, ebx, ecx, edx );
+    case 3: /* Time leaf. */
+        switch ( sub_idx )
+        {
+        case 0: /* features */
+            *eax = ((!!currd->arch.vtsc << 0) |
+                    (!!host_tsc_is_safe() << 1) |
+                    (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2));
+            *ebx = currd->arch.tsc_mode;
+            *ecx = currd->arch.tsc_khz;
+            *edx = currd->arch.incarnation;
+            break;
+
+        case 1: /* scale and offset */
+        {
+            uint64_t offset;
+
+            if ( !currd->arch.vtsc )
+                offset = currd->arch.vtsc_offset;
+            else
+                /* offset already applied to value returned by virtual rdtscp */
+                offset = 0;
+            *eax = (uint32_t)offset;
+            *ebx = (uint32_t)(offset >> 32);
+            *ecx = currd->arch.vtsc_to_ns.mul_frac;
+            *edx = (s8)currd->arch.vtsc_to_ns.shift;
+            break;
+        }
+
+        case 2: /* physical cpu_khz */
+            *eax = cpu_khz;
+            *ebx = *ecx = *edx = 0;
+            break;
+
+        default:
+            *eax = *ebx = *ecx = *edx = 0;
+            break;
+        }
         break;
 
     case 4:
diff --git a/xen/include/asm-x86/time.h b/xen/include/asm-x86/time.h
index 6d704b4..ef989a6 100644
--- a/xen/include/asm-x86/time.h
+++ b/xen/include/asm-x86/time.h
@@ -71,9 +71,6 @@ void force_update_vcpu_system_time(struct vcpu *v);
 
 bool clocksource_is_tsc(void);
 int host_tsc_is_safe(void);
-void cpuid_time_leaf(uint32_t sub_idx, uint32_t *eax, uint32_t *ebx,
-                     uint32_t *ecx, uint32_t *edx);
-
 u64 stime2tsc(s_time_t stime);
 
 struct time_scale;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
  2016-11-16 12:31 [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work Andrew Cooper
                   ` (4 preceding siblings ...)
  2016-11-16 12:31 ` [PATCH 5/6] x86/time: Move cpuid_time_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
@ 2016-11-16 12:31 ` Andrew Cooper
  2016-11-16 15:53   ` Jan Beulich
  5 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:31 UTC (permalink / raw)
  To: Xen-devel; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima, Jan Beulich

This reduces the net complexity of CPUID handling by having all adjustments in
at the same place.  Remove the now-unused hvm_funcs.hypervisor_cpuid_leaf()
infrastructure.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/hvm/hvm.c        | 22 ----------------------
 xen/arch/x86/hvm/vmx/vmx.c    | 19 -------------------
 xen/arch/x86/traps.c          | 34 ++++++++++++++++++++++++++++++----
 xen/include/asm-x86/hvm/hvm.h |  7 -------
 4 files changed, 30 insertions(+), 52 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 862ab76..5ae8270 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3376,28 +3376,6 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
     return rc ? len : 0; /* fake a copy_from_user() return code */
 }
 
-void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
-                               uint32_t *eax, uint32_t *ebx,
-                               uint32_t *ecx, uint32_t *edx)
-{
-    *eax = *ebx = *ecx = *edx = 0;
-    if ( hvm_funcs.hypervisor_cpuid_leaf )
-        hvm_funcs.hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx);
-
-    if ( sub_idx == 0 )
-    {
-        /*
-         * Indicate that memory mapped from other domains (either grants or
-         * foreign pages) has valid IOMMU entries.
-         */
-        *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
-
-        /* Indicate presence of vcpu id and set it in ebx */
-        *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
-        *ebx = current->vcpu_id;
-    }
-}
-
 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
                                    unsigned int *ecx, unsigned int *edx)
 {
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 5a73076..2a5551d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1969,24 +1969,6 @@ static void vmx_handle_eoi(u8 vector)
     __vmwrite(GUEST_INTR_STATUS, status);
 }
 
-void vmx_hypervisor_cpuid_leaf(uint32_t sub_idx,
-                               uint32_t *eax, uint32_t *ebx,
-                               uint32_t *ecx, uint32_t *edx)
-{
-    if ( sub_idx != 0 )
-        return;
-    if ( cpu_has_vmx_apic_reg_virt )
-        *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
-    /*
-     * We want to claim that x2APIC is virtualized if APIC MSR accesses are
-     * not intercepted. When all three of these are true both rdmsr and wrmsr
-     * in the guest will run without VMEXITs (see vmx_vlapic_msr_changed()).
-     */
-    if ( cpu_has_vmx_virtualize_x2apic_mode && cpu_has_vmx_apic_reg_virt &&
-         cpu_has_vmx_virtual_intr_delivery )
-        *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
-}
-
 static void vmx_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     struct vcpu *v;
@@ -2187,7 +2169,6 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .sync_pir_to_irr      = vmx_sync_pir_to_irr,
     .handle_eoi           = vmx_handle_eoi,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
-    .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
     .enable_msr_interception = vmx_enable_msr_interception,
     .is_singlestep_supported = vmx_is_singlestep_supported,
     .set_mode = vmx_set_mode,
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 9be9fe3..59fe98d 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -869,7 +869,8 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
 int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
                uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
 {
-    struct domain *currd = current->domain;
+    struct vcpu *curr = current;
+    struct domain *currd = curr->domain;
     /* Optionally shift out of the way of Viridian architectural leaves. */
     uint32_t base = is_viridian_domain(currd) ? 0x40000100 : 0x40000000;
     uint32_t limit, dummy;
@@ -961,13 +962,38 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
         }
         break;
 
-    case 4:
-        if ( !has_hvm_container_domain(currd) )
+    case 4: /* HVM hypervisor leaf. */
+        if ( !has_hvm_container_domain(currd) || sub_idx != 0 )
         {
             *eax = *ebx = *ecx = *edx = 0;
             break;
         }
-        hvm_hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx);
+
+        if ( cpu_has_vmx_apic_reg_virt )
+            *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
+
+        /*
+         * We want to claim that x2APIC is virtualized if APIC MSR accesses
+         * are not intercepted. When all three of these are true both rdmsr
+         * and wrmsr in the guest will run without VMEXITs (see
+         * vmx_vlapic_msr_changed()).
+         */
+        if ( cpu_has_vmx_virtualize_x2apic_mode &&
+             cpu_has_vmx_apic_reg_virt &&
+             cpu_has_vmx_virtual_intr_delivery )
+            *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
+
+        /*
+         * Indicate that memory mapped from other domains (either grants or
+         * foreign pages) has valid IOMMU entries.
+         */
+        *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
+
+        /* Indicate presence of vcpu id and set it in ebx */
+        *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
+        *ebx = curr->vcpu_id;
+
+        *ecx = *edx = 0;
         break;
 
     default:
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 8e366c0..1b9bbaa 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -207,10 +207,6 @@ struct hvm_function_table {
                                 uint8_t *p2m_acc, bool_t access_r,
                                 bool_t access_w, bool_t access_x);
 
-    void (*hypervisor_cpuid_leaf)(uint32_t sub_idx,
-                                  uint32_t *eax, uint32_t *ebx,
-                                  uint32_t *ecx, uint32_t *edx);
-
     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
     bool_t (*is_singlestep_supported)(void);
     int (*set_mode)(struct vcpu *v, int mode);
@@ -406,9 +402,6 @@ bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
 #define has_viridian_apic_assist(d) \
     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
 
-void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
-                               uint32_t *eax, uint32_t *ebx,
-                               uint32_t *ecx, uint32_t *edx);
 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
                                    unsigned int *ecx, unsigned int *edx);
 bool hvm_check_cpuid_faulting(struct vcpu *v);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information
  2016-11-16 12:31 ` [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information Andrew Cooper
@ 2016-11-16 12:49   ` Jan Beulich
  2016-11-16 12:50     ` Andrew Cooper
  0 siblings, 1 reply; 32+ messages in thread
From: Jan Beulich @ 2016-11-16 12:49 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Xen-devel

>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
> --- a/xen/arch/x86/cpu/common.c
> +++ b/xen/arch/x86/cpu/common.c
> @@ -183,6 +183,25 @@ int get_cpu_vendor(const char v[], enum get_cpu_vendor mode)
>  	return X86_VENDOR_UNKNOWN;
>  }
>  
> +u8 get_cpu_family(uint32_t raw, u8 *model, u8 *stepping)
> +{
> +	u8 fam, mod;
> +
> +	fam = (raw >> 8) & 0xf;
> +	if (fam == 0xf)
> +		fam += (raw >> 20) & 0xff;
> +
> +	mod = (raw >> 4) & 0xf;
> +	if (fam >= 0x6)
> +		mod |= (raw >> 12) & 0xf0;
> +
> +	if ( model )
> +		*model = mod;
> +	if ( stepping )
> +		*stepping = raw & 0xf;

With these converted to Linux style
Reviewed-by: Jan Beulich <jbeulich@suse.com>

But then I think it would be nice if we could slowly get rid of u8 and
alike, in favor of the standard uint8_t and friends, so I would
appreciate if you could avoid introducing new instances.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information
  2016-11-16 12:49   ` Jan Beulich
@ 2016-11-16 12:50     ` Andrew Cooper
  0 siblings, 0 replies; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 12:50 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Xen-devel

On 16/11/16 12:49, Jan Beulich wrote:
>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
>> --- a/xen/arch/x86/cpu/common.c
>> +++ b/xen/arch/x86/cpu/common.c
>> @@ -183,6 +183,25 @@ int get_cpu_vendor(const char v[], enum get_cpu_vendor mode)
>>  	return X86_VENDOR_UNKNOWN;
>>  }
>>  
>> +u8 get_cpu_family(uint32_t raw, u8 *model, u8 *stepping)
>> +{
>> +	u8 fam, mod;
>> +
>> +	fam = (raw >> 8) & 0xf;
>> +	if (fam == 0xf)
>> +		fam += (raw >> 20) & 0xff;
>> +
>> +	mod = (raw >> 4) & 0xf;
>> +	if (fam >= 0x6)
>> +		mod |= (raw >> 12) & 0xf0;
>> +
>> +	if ( model )
>> +		*model = mod;
>> +	if ( stepping )
>> +		*stepping = raw & 0xf;
> With these converted to Linux style
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
>
> But then I think it would be nice if we could slowly get rid of u8 and
> alike, in favor of the standard uint8_t and friends, so I would
> appreciate if you could avoid introducing new instances.

Ok - will do.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
  2016-11-16 12:31 ` [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
@ 2016-11-16 12:53   ` Jan Beulich
  2016-11-16 13:01     ` Andrew Cooper
  0 siblings, 1 reply; 32+ messages in thread
From: Jan Beulich @ 2016-11-16 12:53 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Kevin Tian, Boris Ostrovsky, JunNakajima, Xen-devel

>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
> This reduces the net complexity of CPUID handling by having all adjustments in
> at the same place.  Remove the now-unused vpmu_do_cpuid() infrastructure.

I have to admit that I'm not convinced this is a good idea at this point,
due to the added redundancy. Iirc your plan is to combine hvm_cpuid()
and pv_cpuid() anyway, at which point the folding done here would be
quite a bit more natural.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
  2016-11-16 12:53   ` Jan Beulich
@ 2016-11-16 13:01     ` Andrew Cooper
  2016-11-16 13:34       ` Jan Beulich
  0 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 13:01 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Kevin Tian, Boris Ostrovsky, JunNakajima, Xen-devel

On 16/11/16 12:53, Jan Beulich wrote:
>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
>> This reduces the net complexity of CPUID handling by having all adjustments in
>> at the same place.  Remove the now-unused vpmu_do_cpuid() infrastructure.
> I have to admit that I'm not convinced this is a good idea at this point,
> due to the added redundancy. Iirc your plan is to combine hvm_cpuid()
> and pv_cpuid() anyway, at which point the folding done here would be
> quite a bit more natural.

Indeed, to guest_cpuid().

It is far easier to reason about the safety of both changes by first
untangling the calltree, then merging the functions.  I tried it the
other way first, but that is far more complicated change.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops
  2016-11-16 12:31 ` [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
@ 2016-11-16 13:09   ` Jan Beulich
  2016-11-16 16:27   ` Boris Ostrovsky
  2016-11-17  5:29   ` Tian, Kevin
  2 siblings, 0 replies; 32+ messages in thread
From: Jan Beulich @ 2016-11-16 13:09 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Kevin Tian, Boris Ostrovsky, JunNakajima, Xen-devel

>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
> core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
> values in Xen.
> 
> With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
> remaining hook.  Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
> intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
> the nop path in vpmu_do_msr() now suffices.
> 
> vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
> domains, which enables the removal the duplicate logic in priv_op_read_msr().
> 
> Finally, make all arch_vpmu_ops structures const as they are never modified,
> and make them static as they are not referred to externally.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Fundamentally the patch is fine and could have my ack, but I expect
it to change should patch 2 be deferred. Furthermore ...

> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -488,7 +488,7 @@ static void amd_vpmu_dump(const struct vcpu *v)
>      }
>  }
>  
> -struct arch_vpmu_ops amd_vpmu_ops = {
> +const static struct arch_vpmu_ops amd_vpmu_ops = {

... the conventional ordering is storage class first; I'd find it especially
confusing if someone didn't put "typedef" first, and "typedef" too is a
storage class specifier.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
  2016-11-16 13:01     ` Andrew Cooper
@ 2016-11-16 13:34       ` Jan Beulich
  2016-11-17  5:21         ` Tian, Kevin
  0 siblings, 1 reply; 32+ messages in thread
From: Jan Beulich @ 2016-11-16 13:34 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Kevin Tian, Boris Ostrovsky, JunNakajima, Xen-devel

>>> On 16.11.16 at 14:01, <andrew.cooper3@citrix.com> wrote:
> On 16/11/16 12:53, Jan Beulich wrote:
>>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
>>> This reduces the net complexity of CPUID handling by having all adjustments in
>>> at the same place.  Remove the now-unused vpmu_do_cpuid() infrastructure.
>> I have to admit that I'm not convinced this is a good idea at this point,
>> due to the added redundancy. Iirc your plan is to combine hvm_cpuid()
>> and pv_cpuid() anyway, at which point the folding done here would be
>> quite a bit more natural.
> 
> Indeed, to guest_cpuid().
> 
> It is far easier to reason about the safety of both changes by first
> untangling the calltree, then merging the functions.  I tried it the
> other way first, but that is far more complicated change.

Well, let's see what others think.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 12:31 ` [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
@ 2016-11-16 15:20   ` Jan Beulich
  2016-11-16 17:07     ` Andrew Cooper
  2016-11-16 16:40   ` Boris Ostrovsky
  1 sibling, 1 reply; 32+ messages in thread
From: Jan Beulich @ 2016-11-16 15:20 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Boris Ostrovsky, Kevin Tian, Jun Nakajima, Suravee Suthikulpanit,
	Xen-devel

>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
> @@ -3676,6 +3671,12 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>              if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
>                  *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
>          }
> +
> +        /* SYSCALL is hidden outside of long mode on Intel. */
> +        if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
> +             !hvm_long_mode_enabled(v))
> +            *edx &= ~cpufeat_mask(X86_FEATURE_SYSCALL);

Original code also set the bit in its opposite path - don't you think
this should be retained (or otherwise the change be explained in
the description)?

> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>  
>          *ebx &= hvm_featureset[FEATURESET_e8b];
>          break;
> +
> +    case 0x8000001c:
> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
> +            *eax = 0;
> +        else if ( cpu_has_svm && cpu_has_lwp )
> +            /* Turn on available bit and other features specified in lwp_cfg. */
> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
> +        break;

Wouldn't it be better to do

+        if ( cpu_has_svm && cpu_has_lwp && (v->arch.xcr0 & XSTATE_LWP) )
+            /* Turn on available bit and other features specified in lwp_cfg. */
+            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
+        else
+            *eax = 0;

the more that you're (slightly) altering original behavior anyway?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 5/6] x86/time: Move cpuid_time_leaf() handling into cpuid_hypervisor_leaves()
  2016-11-16 12:31 ` [PATCH 5/6] x86/time: Move cpuid_time_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
@ 2016-11-16 15:47   ` Jan Beulich
  0 siblings, 0 replies; 32+ messages in thread
From: Jan Beulich @ 2016-11-16 15:47 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Xen-devel

>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
> This reduces the net complexity of CPUID handling by having all adjustments in
> at the same place.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
  2016-11-16 12:31 ` [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() " Andrew Cooper
@ 2016-11-16 15:53   ` Jan Beulich
  2016-11-16 17:11     ` Andrew Cooper
  0 siblings, 1 reply; 32+ messages in thread
From: Jan Beulich @ 2016-11-16 15:53 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Kevin Tian, Jun Nakajima, Xen-devel

>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
> @@ -961,13 +962,38 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
>          }
>          break;
>  
> -    case 4:
> -        if ( !has_hvm_container_domain(currd) )
> +    case 4: /* HVM hypervisor leaf. */
> +        if ( !has_hvm_container_domain(currd) || sub_idx != 0 )
>          {
>              *eax = *ebx = *ecx = *edx = 0;

I think you want to pull this ahead of the if(), to match previous
behavior (discarding any guest config overrides). Otherwise ...

>              break;
>          }
> -        hvm_hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx);
> +
> +        if ( cpu_has_vmx_apic_reg_virt )
> +            *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;

... you may end up not or-ing into a zero initial value here.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops
  2016-11-16 12:31 ` [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
  2016-11-16 13:09   ` Jan Beulich
@ 2016-11-16 16:27   ` Boris Ostrovsky
  2016-11-16 17:04     ` Andrew Cooper
  2016-11-17  5:29   ` Tian, Kevin
  2 siblings, 1 reply; 32+ messages in thread
From: Boris Ostrovsky @ 2016-11-16 16:27 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel; +Cc: Kevin Tian, Jun Nakajima, Jan Beulich

On 11/16/2016 07:31 AM, Andrew Cooper wrote:
> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
> index a542f4d..1f822ca 100644
> --- a/xen/arch/x86/cpu/vpmu.c
> +++ b/xen/arch/x86/cpu/vpmu.c
> @@ -136,9 +136,10 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
>      const struct arch_vpmu_ops *ops;
>      int ret = 0;
>  
> +    /* Don't leak PMU MSRs to unprivileged domains. */

This was a somewhat incorrect comment originally and since you are
moving it then perhaps it's worth adding something along the lines of
"if VPMU is off or if the privileged domain is profiling whole system".
Otherwise it gives impression that unprivileged domains never access
those MSRs.

-boris

>      if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
>           ((vpmu_mode & XENPMU_MODE_ALL) &&
> -          !is_hardware_domain(current->domain)) )
> +          !is_hardware_domain(curr->domain)) )
>           goto nop;
>  
>      vpmu = vcpu_vpmu(curr);



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 12:31 ` [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
  2016-11-16 15:20   ` Jan Beulich
@ 2016-11-16 16:40   ` Boris Ostrovsky
  2016-11-16 17:10     ` Andrew Cooper
  1 sibling, 1 reply; 32+ messages in thread
From: Boris Ostrovsky @ 2016-11-16 16:40 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel
  Cc: Suravee Suthikulpanit, Kevin Tian, Jun Nakajima, Jan Beulich

On 11/16/2016 07:31 AM, Andrew Cooper wrote:
> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>  
>          *ebx &= hvm_featureset[FEATURESET_e8b];
>          break;
> +
> +    case 0x8000001c:
> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
> +            *eax = 0;
> +        else if ( cpu_has_svm && cpu_has_lwp )
> +            /* Turn on available bit and other features specified in lwp_cfg. */
> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
> +        break;
>      }

You don't think this whole case should be under cpu_has_svm (or
X86_VENDOR_AMD)?


-boris



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops
  2016-11-16 16:27   ` Boris Ostrovsky
@ 2016-11-16 17:04     ` Andrew Cooper
  2016-11-16 17:09       ` Boris Ostrovsky
  0 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 17:04 UTC (permalink / raw)
  To: Boris Ostrovsky, Xen-devel; +Cc: Kevin Tian, Jun Nakajima, Jan Beulich

On 16/11/16 16:27, Boris Ostrovsky wrote:
> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
>> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
>> index a542f4d..1f822ca 100644
>> --- a/xen/arch/x86/cpu/vpmu.c
>> +++ b/xen/arch/x86/cpu/vpmu.c
>> @@ -136,9 +136,10 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
>>      const struct arch_vpmu_ops *ops;
>>      int ret = 0;
>>  
>> +    /* Don't leak PMU MSRs to unprivileged domains. */
> This was a somewhat incorrect comment originally and since you are
> moving it then perhaps it's worth adding something along the lines of
> "if VPMU is off or if the privileged domain is profiling whole system".
> Otherwise it gives impression that unprivileged domains never access
> those MSRs.

/*
 * Hide the PMU MSRs if vpmu is not configured, or the hardware domain
 * is profiling the whole system.
 */

?

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 15:20   ` Jan Beulich
@ 2016-11-16 17:07     ` Andrew Cooper
  2016-11-17 10:54       ` Jan Beulich
  0 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 17:07 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Boris Ostrovsky, Kevin Tian, Jun Nakajima, Suravee Suthikulpanit,
	Xen-devel

On 16/11/16 15:20, Jan Beulich wrote:
>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
>> @@ -3676,6 +3671,12 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>              if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
>>                  *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
>>          }
>> +
>> +        /* SYSCALL is hidden outside of long mode on Intel. */
>> +        if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
>> +             !hvm_long_mode_enabled(v))
>> +            *edx &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
> Original code also set the bit in its opposite path - don't you think
> this should be retained (or otherwise the change be explained in
> the description)?

Yeah - that was conceptually wrong.  It is legitimate (although
unlikely) for an admin to explicitly configure the VM to hide syscall
even in long mode, as they have distinct feature bits.

In reality however, SYSCALL already set in edx by this point.  I will
update the commit message.

>
>> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>  
>>          *ebx &= hvm_featureset[FEATURESET_e8b];
>>          break;
>> +
>> +    case 0x8000001c:
>> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
>> +            *eax = 0;
>> +        else if ( cpu_has_svm && cpu_has_lwp )
>> +            /* Turn on available bit and other features specified in lwp_cfg. */
>> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
>> +        break;
> Wouldn't it be better to do
>
> +        if ( cpu_has_svm && cpu_has_lwp && (v->arch.xcr0 & XSTATE_LWP) )
> +            /* Turn on available bit and other features specified in lwp_cfg. */
> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
> +        else
> +            *eax = 0;
>
> the more that you're (slightly) altering original behavior anyway?

That would be slightly better.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops
  2016-11-16 17:09       ` Boris Ostrovsky
@ 2016-11-16 17:08         ` Andrew Cooper
  0 siblings, 0 replies; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 17:08 UTC (permalink / raw)
  To: Boris Ostrovsky, Xen-devel; +Cc: Kevin Tian, Jun Nakajima, Jan Beulich

On 16/11/16 17:09, Boris Ostrovsky wrote:
> On 11/16/2016 12:04 PM, Andrew Cooper wrote:
>> On 16/11/16 16:27, Boris Ostrovsky wrote:
>>> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
>>>> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
>>>> index a542f4d..1f822ca 100644
>>>> --- a/xen/arch/x86/cpu/vpmu.c
>>>> +++ b/xen/arch/x86/cpu/vpmu.c
>>>> @@ -136,9 +136,10 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
>>>>      const struct arch_vpmu_ops *ops;
>>>>      int ret = 0;
>>>>  
>>>> +    /* Don't leak PMU MSRs to unprivileged domains. */
>>> This was a somewhat incorrect comment originally and since you are
>>> moving it then perhaps it's worth adding something along the lines of
>>> "if VPMU is off or if the privileged domain is profiling whole system".
>>> Otherwise it gives impression that unprivileged domains never access
>>> those MSRs.
>> /*
>>  * Hide the PMU MSRs if vpmu is not configured, or the hardware domain
>>  * is profiling the whole system.
>>  */
>>
>> ?
>
> Sure, thanks.
>
> (or, in fact, you can just drop the whole comment since I think it's
> pretty obvious what's going on).

I'd prefer to keep it.  I bet it won't be so obvious in 6 months time.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops
  2016-11-16 17:04     ` Andrew Cooper
@ 2016-11-16 17:09       ` Boris Ostrovsky
  2016-11-16 17:08         ` Andrew Cooper
  0 siblings, 1 reply; 32+ messages in thread
From: Boris Ostrovsky @ 2016-11-16 17:09 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel; +Cc: Kevin Tian, Jun Nakajima, Jan Beulich

On 11/16/2016 12:04 PM, Andrew Cooper wrote:
> On 16/11/16 16:27, Boris Ostrovsky wrote:
>> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
>>> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
>>> index a542f4d..1f822ca 100644
>>> --- a/xen/arch/x86/cpu/vpmu.c
>>> +++ b/xen/arch/x86/cpu/vpmu.c
>>> @@ -136,9 +136,10 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
>>>      const struct arch_vpmu_ops *ops;
>>>      int ret = 0;
>>>  
>>> +    /* Don't leak PMU MSRs to unprivileged domains. */
>> This was a somewhat incorrect comment originally and since you are
>> moving it then perhaps it's worth adding something along the lines of
>> "if VPMU is off or if the privileged domain is profiling whole system".
>> Otherwise it gives impression that unprivileged domains never access
>> those MSRs.
> /*
>  * Hide the PMU MSRs if vpmu is not configured, or the hardware domain
>  * is profiling the whole system.
>  */
>
> ?


Sure, thanks.

(or, in fact, you can just drop the whole comment since I think it's
pretty obvious what's going on).

-boris

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 16:40   ` Boris Ostrovsky
@ 2016-11-16 17:10     ` Andrew Cooper
  2016-11-16 17:34       ` Boris Ostrovsky
  0 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 17:10 UTC (permalink / raw)
  To: Boris Ostrovsky, Xen-devel
  Cc: Suravee Suthikulpanit, Kevin Tian, Jun Nakajima, Jan Beulich

On 16/11/16 16:40, Boris Ostrovsky wrote:
> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
>> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>  
>>          *ebx &= hvm_featureset[FEATURESET_e8b];
>>          break;
>> +
>> +    case 0x8000001c:
>> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
>> +            *eax = 0;
>> +        else if ( cpu_has_svm && cpu_has_lwp )
>> +            /* Turn on available bit and other features specified in lwp_cfg. */
>> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
>> +        break;
>>      }
> You don't think this whole case should be under cpu_has_svm (or
> X86_VENDOR_AMD)?

LWP, being independently identifiable state should be gated on that
alone, even if in reality, it only exists on AMD hardware.

The use of cpu_has_svm is only because guest_lwp_cfg is in an svm
union.  Were guest_lwp_cfg to move, the condition should be relaxed.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
  2016-11-16 15:53   ` Jan Beulich
@ 2016-11-16 17:11     ` Andrew Cooper
  0 siblings, 0 replies; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 17:11 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Kevin Tian, Jun Nakajima, Xen-devel

On 16/11/16 15:53, Jan Beulich wrote:
>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
>> @@ -961,13 +962,38 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
>>          }
>>          break;
>>  
>> -    case 4:
>> -        if ( !has_hvm_container_domain(currd) )
>> +    case 4: /* HVM hypervisor leaf. */
>> +        if ( !has_hvm_container_domain(currd) || sub_idx != 0 )
>>          {
>>              *eax = *ebx = *ecx = *edx = 0;
> I think you want to pull this ahead of the if(), to match previous
> behavior (discarding any guest config overrides). Otherwise ...
>
>>              break;
>>          }
>> -        hvm_hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx);
>> +
>> +        if ( cpu_has_vmx_apic_reg_virt )
>> +            *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
> ... you may end up not or-ing into a zero initial value here.

Ah yes.  I will fix that.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 17:10     ` Andrew Cooper
@ 2016-11-16 17:34       ` Boris Ostrovsky
  2016-11-16 17:34         ` Andrew Cooper
  0 siblings, 1 reply; 32+ messages in thread
From: Boris Ostrovsky @ 2016-11-16 17:34 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel
  Cc: Kevin Tian, Jan Beulich, Jun Nakajima, Suravee Suthikulpanit

On 11/16/2016 12:10 PM, Andrew Cooper wrote:
> On 16/11/16 16:40, Boris Ostrovsky wrote:
>> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
>>> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>>  
>>>          *ebx &= hvm_featureset[FEATURESET_e8b];
>>>          break;
>>> +
>>> +    case 0x8000001c:
>>> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
>>> +            *eax = 0;
>>> +        else if ( cpu_has_svm && cpu_has_lwp )
>>> +            /* Turn on available bit and other features specified in lwp_cfg. */
>>> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
>>> +        break;
>>>      }
>> You don't think this whole case should be under cpu_has_svm (or
>> X86_VENDOR_AMD)?
> LWP, being independently identifiable state should be gated on that
> alone, even if in reality, it only exists on AMD hardware.
>
> The use of cpu_has_svm is only because guest_lwp_cfg is in an svm
> union.  Were guest_lwp_cfg to move, the condition should be relaxed.

I was thinking about the first 'if' clause. I believe 0x8000001c doesn't
exist on Intel yet but if they add it we will clear eax for no good reason.

OTOH we wouldn't be handling the leaf correctly anyway so maybe it's OK.


-boris


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 17:34       ` Boris Ostrovsky
@ 2016-11-16 17:34         ` Andrew Cooper
  2016-11-16 17:45           ` Boris Ostrovsky
  0 siblings, 1 reply; 32+ messages in thread
From: Andrew Cooper @ 2016-11-16 17:34 UTC (permalink / raw)
  To: Boris Ostrovsky, Xen-devel
  Cc: Kevin Tian, Jan Beulich, Jun Nakajima, Suravee Suthikulpanit

On 16/11/16 17:34, Boris Ostrovsky wrote:
> On 11/16/2016 12:10 PM, Andrew Cooper wrote:
>> On 16/11/16 16:40, Boris Ostrovsky wrote:
>>> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
>>>> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>>>  
>>>>          *ebx &= hvm_featureset[FEATURESET_e8b];
>>>>          break;
>>>> +
>>>> +    case 0x8000001c:
>>>> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
>>>> +            *eax = 0;
>>>> +        else if ( cpu_has_svm && cpu_has_lwp )
>>>> +            /* Turn on available bit and other features specified in lwp_cfg. */
>>>> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
>>>> +        break;
>>>>      }
>>> You don't think this whole case should be under cpu_has_svm (or
>>> X86_VENDOR_AMD)?
>> LWP, being independently identifiable state should be gated on that
>> alone, even if in reality, it only exists on AMD hardware.
>>
>> The use of cpu_has_svm is only because guest_lwp_cfg is in an svm
>> union.  Were guest_lwp_cfg to move, the condition should be relaxed.
> I was thinking about the first 'if' clause. I believe 0x8000001c doesn't
> exist on Intel yet but if they add it we will clear eax for no good reason.
>
> OTOH we wouldn't be handling the leaf correctly anyway so maybe it's OK.

What do you think about Jan's suggestion, which is slightly better
overall anyway?

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 17:34         ` Andrew Cooper
@ 2016-11-16 17:45           ` Boris Ostrovsky
  2016-11-17  5:41             ` Tian, Kevin
  0 siblings, 1 reply; 32+ messages in thread
From: Boris Ostrovsky @ 2016-11-16 17:45 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel
  Cc: Kevin Tian, Jan Beulich, Jun Nakajima, Suravee Suthikulpanit

On 11/16/2016 12:34 PM, Andrew Cooper wrote:
> On 16/11/16 17:34, Boris Ostrovsky wrote:
>> On 11/16/2016 12:10 PM, Andrew Cooper wrote:
>>> On 16/11/16 16:40, Boris Ostrovsky wrote:
>>>> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
>>>>> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>>>>  
>>>>>          *ebx &= hvm_featureset[FEATURESET_e8b];
>>>>>          break;
>>>>> +
>>>>> +    case 0x8000001c:
>>>>> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
>>>>> +            *eax = 0;
>>>>> +        else if ( cpu_has_svm && cpu_has_lwp )
>>>>> +            /* Turn on available bit and other features specified in lwp_cfg. */
>>>>> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
>>>>> +        break;
>>>>>      }
>>>> You don't think this whole case should be under cpu_has_svm (or
>>>> X86_VENDOR_AMD)?
>>> LWP, being independently identifiable state should be gated on that
>>> alone, even if in reality, it only exists on AMD hardware.
>>>
>>> The use of cpu_has_svm is only because guest_lwp_cfg is in an svm
>>> union.  Were guest_lwp_cfg to move, the condition should be relaxed.
>> I was thinking about the first 'if' clause. I believe 0x8000001c doesn't
>> exist on Intel yet but if they add it we will clear eax for no good reason.
>>
>> OTOH we wouldn't be handling the leaf correctly anyway so maybe it's OK.
> What do you think about Jan's suggestion, which is slightly better
> overall anyway?

I don't think it changes anything wrt clearing eax on Intel.

-boris

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
  2016-11-16 13:34       ` Jan Beulich
@ 2016-11-17  5:21         ` Tian, Kevin
  2016-11-17 10:52           ` Jan Beulich
  0 siblings, 1 reply; 32+ messages in thread
From: Tian, Kevin @ 2016-11-17  5:21 UTC (permalink / raw)
  To: Jan Beulich, Andrew Cooper; +Cc: Boris Ostrovsky, Nakajima, Jun, Xen-devel

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Wednesday, November 16, 2016 9:34 PM
> 
> >>> On 16.11.16 at 14:01, <andrew.cooper3@citrix.com> wrote:
> > On 16/11/16 12:53, Jan Beulich wrote:
> >>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
> >>> This reduces the net complexity of CPUID handling by having all adjustments in
> >>> at the same place.  Remove the now-unused vpmu_do_cpuid() infrastructure.
> >> I have to admit that I'm not convinced this is a good idea at this point,
> >> due to the added redundancy. Iirc your plan is to combine hvm_cpuid()
> >> and pv_cpuid() anyway, at which point the folding done here would be
> >> quite a bit more natural.
> >
> > Indeed, to guest_cpuid().
> >
> > It is far easier to reason about the safety of both changes by first
> > untangling the calltree, then merging the functions.  I tried it the
> > other way first, but that is far more complicated change.
> 
> Well, let's see what others think.
> 

I'm fine with this change, as long as it paves a clear incremental way 
towards final guest_cpuid.

Acked-by: Kevin Tian <kevin.tian@intel.com>

Thanks
Kevin

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops
  2016-11-16 12:31 ` [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
  2016-11-16 13:09   ` Jan Beulich
  2016-11-16 16:27   ` Boris Ostrovsky
@ 2016-11-17  5:29   ` Tian, Kevin
  2 siblings, 0 replies; 32+ messages in thread
From: Tian, Kevin @ 2016-11-17  5:29 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel; +Cc: Boris Ostrovsky, Nakajima, Jun, Jan Beulich

> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Wednesday, November 16, 2016 8:32 PM
> 
> core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
> values in Xen.
> 
> With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
> remaining hook.  Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
> intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
> the nop path in vpmu_do_msr() now suffices.
> 
> vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
> domains, which enables the removal the duplicate logic in priv_op_read_msr().
> 
> Finally, make all arch_vpmu_ops structures const as they are never modified,
> and make them static as they are not referred to externally.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Kevin Tian <kevin.tian@intel.com>, assuming Boris' comment fixed.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 17:45           ` Boris Ostrovsky
@ 2016-11-17  5:41             ` Tian, Kevin
  0 siblings, 0 replies; 32+ messages in thread
From: Tian, Kevin @ 2016-11-17  5:41 UTC (permalink / raw)
  To: Boris Ostrovsky, Andrew Cooper, Xen-devel
  Cc: Jan Beulich, Nakajima, Jun, Suravee Suthikulpanit

> From: Boris Ostrovsky [mailto:boris.ostrovsky@oracle.com]
> Sent: Thursday, November 17, 2016 1:45 AM
> 
> On 11/16/2016 12:34 PM, Andrew Cooper wrote:
> > On 16/11/16 17:34, Boris Ostrovsky wrote:
> >> On 11/16/2016 12:10 PM, Andrew Cooper wrote:
> >>> On 16/11/16 16:40, Boris Ostrovsky wrote:
> >>>> On 11/16/2016 07:31 AM, Andrew Cooper wrote:
> >>>>> @@ -3700,6 +3701,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
> unsigned int *ebx,
> >>>>>
> >>>>>          *ebx &= hvm_featureset[FEATURESET_e8b];
> >>>>>          break;
> >>>>> +
> >>>>> +    case 0x8000001c:
> >>>>> +        if ( !(v->arch.xcr0 & XSTATE_LWP) )
> >>>>> +            *eax = 0;
> >>>>> +        else if ( cpu_has_svm && cpu_has_lwp )
> >>>>> +            /* Turn on available bit and other features specified in lwp_cfg. */
> >>>>> +            *eax = (*edx & v->arch.hvm_svm.guest_lwp_cfg) | 1;
> >>>>> +        break;
> >>>>>      }
> >>>> You don't think this whole case should be under cpu_has_svm (or
> >>>> X86_VENDOR_AMD)?
> >>> LWP, being independently identifiable state should be gated on that
> >>> alone, even if in reality, it only exists on AMD hardware.
> >>>
> >>> The use of cpu_has_svm is only because guest_lwp_cfg is in an svm
> >>> union.  Were guest_lwp_cfg to move, the condition should be relaxed.
> >> I was thinking about the first 'if' clause. I believe 0x8000001c doesn't
> >> exist on Intel yet but if they add it we will clear eax for no good reason.
> >>
> >> OTOH we wouldn't be handling the leaf correctly anyway so maybe it's OK.
> > What do you think about Jan's suggestion, which is slightly better
> > overall anyway?
> 
> I don't think it changes anything wrt clearing eax on Intel.
> 

I don't know whether Intel/AMD will coordinate on those leaf code 
definitions (a question I need to ask internally). If there is no guarantee
that this leaf will mean same feature cross vendors, might be safer to
have whole trunk under svm condition?

Thanks
Kevin
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
  2016-11-17  5:21         ` Tian, Kevin
@ 2016-11-17 10:52           ` Jan Beulich
  0 siblings, 0 replies; 32+ messages in thread
From: Jan Beulich @ 2016-11-17 10:52 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: Kevin Tian, Boris Ostrovsky, Jun Nakajima, Xen-devel

>>> On 17.11.16 at 06:21, <kevin.tian@intel.com> wrote:
>>  From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: Wednesday, November 16, 2016 9:34 PM
>> >>> On 16.11.16 at 14:01, <andrew.cooper3@citrix.com> wrote:
>> > On 16/11/16 12:53, Jan Beulich wrote:
>> >>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
>> >>> This reduces the net complexity of CPUID handling by having all adjustments in
>> >>> at the same place.  Remove the now-unused vpmu_do_cpuid() infrastructure.
>> >> I have to admit that I'm not convinced this is a good idea at this point,
>> >> due to the added redundancy. Iirc your plan is to combine hvm_cpuid()
>> >> and pv_cpuid() anyway, at which point the folding done here would be
>> >> quite a bit more natural.
>> >
>> > Indeed, to guest_cpuid().
>> >
>> > It is far easier to reason about the safety of both changes by first
>> > untangling the calltree, then merging the functions.  I tried it the
>> > other way first, but that is far more complicated change.
>> 
>> Well, let's see what others think.
> 
> I'm fine with this change, as long as it paves a clear incremental way 
> towards final guest_cpuid.
> 
> Acked-by: Kevin Tian <kevin.tian@intel.com>

Okay then -
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid()
  2016-11-16 17:07     ` Andrew Cooper
@ 2016-11-17 10:54       ` Jan Beulich
  0 siblings, 0 replies; 32+ messages in thread
From: Jan Beulich @ 2016-11-17 10:54 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Boris Ostrovsky, Kevin Tian, Jun Nakajima, Suravee Suthikulpanit,
	Xen-devel

>>> On 16.11.16 at 18:07, <andrew.cooper3@citrix.com> wrote:
> On 16/11/16 15:20, Jan Beulich wrote:
>>>>> On 16.11.16 at 13:31, <andrew.cooper3@citrix.com> wrote:
>>> @@ -3676,6 +3671,12 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>>>              if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
>>>                  *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
>>>          }
>>> +
>>> +        /* SYSCALL is hidden outside of long mode on Intel. */
>>> +        if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
>>> +             !hvm_long_mode_enabled(v))
>>> +            *edx &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
>> Original code also set the bit in its opposite path - don't you think
>> this should be retained (or otherwise the change be explained in
>> the description)?
> 
> Yeah - that was conceptually wrong.  It is legitimate (although
> unlikely) for an admin to explicitly configure the VM to hide syscall
> even in long mode, as they have distinct feature bits.

I don't think any 64-bit OS would work without SYSCALL support.
Look at ourselves - we don't even care to check, and we have no
alternative way of invoking hypercalls in case it was absent.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2016-11-17 10:54 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-11-16 12:31 [PATCH for-4.9 0/6] Introductory cleanup for CPUID phase 2 work Andrew Cooper
2016-11-16 12:31 ` [PATCH 1/6] xen/x86: Add a helper to calculate family/model/stepping information Andrew Cooper
2016-11-16 12:49   ` Jan Beulich
2016-11-16 12:50     ` Andrew Cooper
2016-11-16 12:31 ` [PATCH 2/6] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid() Andrew Cooper
2016-11-16 12:53   ` Jan Beulich
2016-11-16 13:01     ` Andrew Cooper
2016-11-16 13:34       ` Jan Beulich
2016-11-17  5:21         ` Tian, Kevin
2016-11-17 10:52           ` Jan Beulich
2016-11-16 12:31 ` [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops Andrew Cooper
2016-11-16 13:09   ` Jan Beulich
2016-11-16 16:27   ` Boris Ostrovsky
2016-11-16 17:04     ` Andrew Cooper
2016-11-16 17:09       ` Boris Ostrovsky
2016-11-16 17:08         ` Andrew Cooper
2016-11-17  5:29   ` Tian, Kevin
2016-11-16 12:31 ` [PATCH 4/6] x86/hvm: Move hvm_funcs.cpuid_intercept() handling into hvm_cpuid() Andrew Cooper
2016-11-16 15:20   ` Jan Beulich
2016-11-16 17:07     ` Andrew Cooper
2016-11-17 10:54       ` Jan Beulich
2016-11-16 16:40   ` Boris Ostrovsky
2016-11-16 17:10     ` Andrew Cooper
2016-11-16 17:34       ` Boris Ostrovsky
2016-11-16 17:34         ` Andrew Cooper
2016-11-16 17:45           ` Boris Ostrovsky
2016-11-17  5:41             ` Tian, Kevin
2016-11-16 12:31 ` [PATCH 5/6] x86/time: Move cpuid_time_leaf() handling into cpuid_hypervisor_leaves() Andrew Cooper
2016-11-16 15:47   ` Jan Beulich
2016-11-16 12:31 ` [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() " Andrew Cooper
2016-11-16 15:53   ` Jan Beulich
2016-11-16 17:11     ` Andrew Cooper

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).