xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: keir@xen.org, suravee.suthikulpanit@amd.com,
	andrew.cooper3@citrix.com, eddie.dong@intel.com,
	dietmar.hahn@ts.fujitsu.com, JBeulich@suse.com,
	jun.nakajima@intel.com, boris.ostrovsky@oracle.com
Subject: [PATCH v4 13/17] x86/VPMU: Add privileged PMU mode
Date: Tue, 21 Jan 2014 14:08:58 -0500	[thread overview]
Message-ID: <1390331342-3967-14-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1390331342-3967-1-git-send-email-boris.ostrovsky@oracle.com>

Add support for privileged PMU mode which allows privileged domain (dom0)
profile both itself (and the hypervisor) and the guests. While this mode is on
profiling in guests is disabled.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
 xen/arch/x86/hvm/vpmu.c     | 88 ++++++++++++++++++++++++++++++++-------------
 xen/arch/x86/traps.c        |  6 +++-
 xen/include/public/xenpmu.h |  3 ++
 3 files changed, 72 insertions(+), 25 deletions(-)

diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index aead6af..214300d 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -87,6 +87,9 @@ int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(current);
 
+    if ( (vpmu_mode & XENPMU_MODE_PRIV) && !is_control_domain(current->domain) )
+        return 0;
+
     if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
     {
         int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content);
@@ -112,6 +115,9 @@ int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(current);
 
+    if ( (vpmu_mode & XENPMU_MODE_PRIV) && !is_control_domain(current->domain) )
+        return 0;
+
     if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
     {
         int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
@@ -134,14 +140,18 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
     struct vpmu_struct *vpmu;
 
     /* dom0 will handle this interrupt */
-    if ( v->domain->domain_id >= DOMID_FIRST_RESERVED )
+    if ( (vpmu_mode & XENPMU_MODE_PRIV) ||
+         (v->domain->domain_id >= DOMID_FIRST_RESERVED) )
         v = dom0->vcpu[smp_processor_id() % dom0->max_vcpus];
 
     vpmu = vcpu_vpmu(v);
-    if ( !is_hvm_domain(v->domain) )
+    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
+        return 0;
+
+    if ( !is_hvm_domain(v->domain) || (vpmu_mode & XENPMU_MODE_PRIV) )
     {
         /* PV guest or dom0 is doing system profiling */
-        const struct cpu_user_regs *gregs;
+        struct cpu_user_regs *gregs;
         int err;
 
         if ( v->arch.vpmu.xenpmu_data->pmu_flags & PMU_CACHED )
@@ -152,33 +162,62 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
         err = vpmu->arch_vpmu_ops->arch_vpmu_save(v);
         vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
 
-        /* Store appropriate registers in xenpmu_data */
-        if ( is_pv_32bit_domain(current->domain) )
+        if ( !is_hvm_domain(current->domain) )
         {
-            /*
-             * 32-bit dom0 cannot process Xen's addresses (which are 64 bit)
-             * and therefore we treat it the same way as a non-priviledged
-             * PV 32-bit domain.
-             */
-            struct compat_cpu_user_regs *cmp;
-
-            gregs = guest_cpu_user_regs();
-
-            cmp = (struct compat_cpu_user_regs *)
-                    &v->arch.vpmu.xenpmu_data->pmu.r.regs;
-            XLAT_cpu_user_regs(cmp, gregs);
+            uint16_t cs = (current->arch.flags & TF_kernel_mode) ? 0 : 0x3;
+
+            /* Store appropriate registers in xenpmu_data */
+            if ( is_pv_32bit_domain(current->domain) )
+            {
+                gregs = guest_cpu_user_regs();
+
+                if ( (vpmu_mode & XENPMU_MODE_PRIV) &&
+                     !is_pv_32bit_domain(v->domain) )
+                    memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs,
+                           gregs, sizeof(struct cpu_user_regs));
+                else 
+                {
+                    /*
+                     * 32-bit dom0 cannot process Xen's addresses (which are
+                     * 64 bit) and therefore we treat it the same way as a
+                     * non-priviledged PV 32-bit domain.
+                     */
+
+                    struct compat_cpu_user_regs *cmp;
+
+                    cmp = (struct compat_cpu_user_regs *)
+                        &v->arch.vpmu.xenpmu_data->pmu.r.regs;
+                    XLAT_cpu_user_regs(cmp, gregs);
+                }
+            }
+            else if ( !is_control_domain(current->domain) &&
+                      !is_idle_vcpu(current) )
+            {
+                /* PV guest */
+                gregs = guest_cpu_user_regs();
+                memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs,
+                       gregs, sizeof(struct cpu_user_regs));
+            }
+            else
+                memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs,
+                       regs, sizeof(struct cpu_user_regs));
+
+            gregs = &v->arch.vpmu.xenpmu_data->pmu.r.regs;
+            gregs->cs = cs;
         }
-        else if ( !is_control_domain(current->domain) &&
-                 !is_idle_vcpu(current) )
+        else
         {
-            /* PV guest */
+            /* HVM guest */
+            struct segment_register cs;
+
             gregs = guest_cpu_user_regs();
             memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs,
                    gregs, sizeof(struct cpu_user_regs));
+
+            hvm_get_segment_register(current, x86_seg_cs, &cs);
+            gregs = &v->arch.vpmu.xenpmu_data->pmu.r.regs;
+            gregs->cs = cs.attr.fields.dpl;
         }
-        else
-            memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs,
-                   regs, sizeof(struct cpu_user_regs));
 
         v->arch.vpmu.xenpmu_data->domain_id = current->domain->domain_id;
         v->arch.vpmu.xenpmu_data->vcpu_id = current->vcpu_id;
@@ -444,7 +483,8 @@ long do_xenpmu_op(int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
             return -EFAULT;
 
         mode = (uint32_t)pmu_params.d.val & XENPMU_MODE_MASK;
-        if ( mode & ~XENPMU_MODE_ON )
+        if ( (mode & ~(XENPMU_MODE_ON | XENPMU_MODE_PRIV)) ||
+             ((mode & XENPMU_MODE_ON) && (mode & XENPMU_MODE_PRIV)) )
             return -EINVAL;
 
         vpmu_mode &= ~XENPMU_MODE_MASK;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 7ff8401..1854230 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2510,7 +2510,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
         case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
         case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
             if ( !vpmu_do_wrmsr(regs->ecx, msr_content) )
-                goto invalid;
+            {
+                if ( (vpmu_mode & XENPMU_MODE_PRIV) &&
+                      is_control_domain(v->domain) )
+                    goto invalid;
+            }
             break;
         default:
             if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
diff --git a/xen/include/public/xenpmu.h b/xen/include/public/xenpmu.h
index df85209..f715f30 100644
--- a/xen/include/public/xenpmu.h
+++ b/xen/include/public/xenpmu.h
@@ -56,11 +56,14 @@ DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
  * - XENPMU_MODE_OFF:   No PMU virtualization
  * - XENPMU_MODE_ON:    Guests can profile themselves, dom0 profiles
  *                      itself and Xen
+ * - XENPMU_MODE_PRIV:  Only dom0 has access to VPMU and it profiles
+ *                      everyone: itself, the hypervisor and the guests.
  */
 #define XENPMU_FEATURE_SHIFT      16
 #define XENPMU_MODE_MASK          ((1U << XENPMU_FEATURE_SHIFT) - 1)
 #define XENPMU_MODE_OFF           0
 #define XENPMU_MODE_ON            (1<<0)
+#define XENPMU_MODE_PRIV          (1<<1)
 
 /*
  * PMU features:
-- 
1.8.1.4

  parent reply	other threads:[~2014-01-21 19:08 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-01-21 19:08 [PATCH v4 00/17] x86/PMU: Xen PMU PV support Boris Ostrovsky
2014-01-21 19:08 ` [PATCH v4 01/17] common/symbols: Export hypervisor symbols to privileged guest Boris Ostrovsky
2014-01-24 14:16   ` Jan Beulich
2014-01-21 19:08 ` [PATCH v4 02/17] x86/VPMU: Stop AMD counters when called from vpmu_save_force() Boris Ostrovsky
2014-01-21 19:08 ` [PATCH v4 03/17] x86/VPMU: Minor VPMU cleanup Boris Ostrovsky
2014-01-24 14:28   ` Jan Beulich
2014-01-21 19:08 ` [PATCH v4 04/17] intel/VPMU: Clean up Intel VPMU code Boris Ostrovsky
2014-01-21 19:08 ` [PATCH v4 05/17] x86/VPMU: Handle APIC_LVTPC accesses Boris Ostrovsky
2014-01-21 19:08 ` [PATCH v4 06/17] intel/VPMU: MSR_CORE_PERF_GLOBAL_CTRL should be initialized to zero Boris Ostrovsky
2014-01-21 19:08 ` [PATCH v4 07/17] x86/VPMU: Add public xenpmu.h Boris Ostrovsky
2014-01-24 14:54   ` Jan Beulich
2014-01-24 16:49     ` Boris Ostrovsky
2014-01-24 16:57       ` Jan Beulich
2014-01-21 19:08 ` [PATCH v4 08/17] x86/VPMU: Make vpmu not HVM-specific Boris Ostrovsky
2014-01-24 14:59   ` Jan Beulich
2014-01-21 19:08 ` [PATCH v4 09/17] x86/VPMU: Interface for setting PMU mode and flags Boris Ostrovsky
2014-01-24 15:10   ` Jan Beulich
2014-01-24 17:13     ` Boris Ostrovsky
2014-01-27  8:34       ` Jan Beulich
2014-01-27 15:20         ` Boris Ostrovsky
2014-01-27 15:29           ` Jan Beulich
2014-01-21 19:08 ` [PATCH v4 10/17] x86/VPMU: Initialize PMU for PV guests Boris Ostrovsky
2014-01-31 16:58   ` Jan Beulich
2014-01-21 19:08 ` [PATCH v4 11/17] x86/VPMU: Add support for PMU register handling on " Boris Ostrovsky
2014-02-04 11:14   ` Jan Beulich
2014-02-04 15:07     ` Boris Ostrovsky
2014-01-21 19:08 ` [PATCH v4 12/17] x86/VPMU: Handle PMU interrupts for " Boris Ostrovsky
2014-02-04 11:22   ` Jan Beulich
2014-02-04 15:26     ` Boris Ostrovsky
2014-02-04 15:50       ` Jan Beulich
2014-01-21 19:08 ` Boris Ostrovsky [this message]
2014-02-04 11:31   ` [PATCH v4 13/17] x86/VPMU: Add privileged PMU mode Jan Beulich
2014-02-04 15:53     ` Boris Ostrovsky
2014-02-04 16:01       ` Jan Beulich
2014-02-04 16:13         ` Boris Ostrovsky
2014-02-04 16:39           ` Jan Beulich
2014-01-21 19:08 ` [PATCH v4 14/17] x86/VPMU: Save VPMU state for PV guests during context switch Boris Ostrovsky
2014-02-04 11:38   ` Jan Beulich
2014-02-04 15:56     ` Boris Ostrovsky
2014-01-21 19:09 ` [PATCH v4 15/17] x86/VPMU: NMI-based VPMU support Boris Ostrovsky
2014-02-04 11:48   ` Jan Beulich
2014-02-04 16:31     ` Boris Ostrovsky
2014-02-04 16:41       ` Jan Beulich
2014-02-04 16:50         ` Boris Ostrovsky
2014-01-21 19:09 ` [PATCH v4 16/17] x86/VPMU: Suport for PVH guests Boris Ostrovsky
2014-02-04 11:51   ` Jan Beulich
2014-02-04 16:44     ` Boris Ostrovsky
2014-01-21 19:09 ` [PATCH v4 17/17] x86/VPMU: Move VPMU files up from hvm/ directory Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1390331342-3967-14-git-send-email-boris.ostrovsky@oracle.com \
    --to=boris.ostrovsky@oracle.com \
    --cc=JBeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dietmar.hahn@ts.fujitsu.com \
    --cc=eddie.dong@intel.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).