xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: jun.nakajima@intel.com, George.Dunlap@eu.citrix.com,
	jacob.shin@amd.com, eddie.dong@intel.com,
	dietmar.hahn@ts.fujitsu.com, xen-devel@lists.xen.org,
	JBeulich@suse.com, suravee.suthikulpanit@amd.com
Subject: Re: [PATCH v2 05/13] intel/VPMU: Clean up Intel VPMU code
Date: Mon, 23 Sep 2013 15:46:47 -0400	[thread overview]
Message-ID: <20130923194647.GB3019@phenom.dumpdata.com> (raw)
In-Reply-To: <1379670132-1748-6-git-send-email-boris.ostrovsky@oracle.com>

On Fri, Sep 20, 2013 at 05:42:04AM -0400, Boris Ostrovsky wrote:
> Remove struct pmumsr and core2_pmu_enable. Replace static MSR structures with
> fields in core2_vpmu_context.
> 
> Call core2_get_pmc_count() once, during initialization.
> 
> Properly clean up when core2_vpmu_alloc_resource() fails and add routines
> to remove MSRs from VMCS.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> ---
>  xen/arch/x86/hvm/vmx/vmcs.c              |  59 +++++++
>  xen/arch/x86/hvm/vmx/vpmu_core2.c        | 289 ++++++++++++++-----------------
>  xen/include/asm-x86/hvm/vmx/vmcs.h       |   2 +
>  xen/include/asm-x86/hvm/vmx/vpmu_core2.h |  19 --
>  4 files changed, 191 insertions(+), 178 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index de9f592..756bc13 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1136,6 +1136,36 @@ int vmx_add_guest_msr(u32 msr)
>      return 0;
>  }
>  
> +void vmx_rm_guest_msr(u32 msr)
> +{
> +    struct vcpu *curr = current;
> +    unsigned int i, idx, msr_count = curr->arch.hvm_vmx.msr_count;
> +    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
> +
> +    if ( msr_area == NULL )
> +        return;
> +    
> +    for ( idx = 0; idx < msr_count; idx++ )
> +        if ( msr_area[idx].index == msr )
> +            break;
> +
> +    if ( idx == msr_count )
> +        return;
> +    
> +    for ( i = idx; i < msr_count - 1; i++ )
> +    {
> +        msr_area[i].index = msr_area[i + 1].index;
> +        rdmsrl(msr_area[i].index, msr_area[i].data);
> +    }
> +    msr_area[msr_count - 1].index = 0;
> +
> +    curr->arch.hvm_vmx.msr_count = --msr_count;
> +    __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
> +    __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
> +
> +    return;
> +}
> +
>  int vmx_add_host_load_msr(u32 msr)
>  {
>      struct vcpu *curr = current;
> @@ -1166,6 +1196,35 @@ int vmx_add_host_load_msr(u32 msr)
>      return 0;
>  }
>  
> +void vmx_rm_host_load_msr(u32 msr)
> +{
> +    struct vcpu *curr = current;
> +    unsigned int i, idx,  msr_count = curr->arch.hvm_vmx.host_msr_count;
> +    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
> +
> +    if ( msr_area == NULL )
> +        return;
> +
> +    for ( idx = 0; idx < msr_count; idx++ )
> +        if ( msr_area[idx].index == msr )
> +            break;
> +
> +    if ( idx == msr_count )
> +        return;
> +    
> +    for ( i = idx; i < msr_count - 1; i++ )
> +    {
> +        msr_area[i].index = msr_area[i + 1].index;
> +        rdmsrl(msr_area[i].index, msr_area[i].data);
> +    }
> +    msr_area[msr_count - 1].index = 0;    
> +
> +    curr->arch.hvm_vmx.host_msr_count = --msr_count;
> +    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
> +
> +    return;
> +}
> +
>  void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
>  {
>      int index, offset, changed;
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 101888d..50f784f 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -65,6 +65,26 @@
>  #define PMU_FIXED_WIDTH_MASK     (((1 << PMU_FIXED_WIDTH_BITS) -1) << PMU_FIXED_WIDTH_SHIFT)
>  
>  /*
> + * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
> + * counters. 4 bits for every counter.
> + */
> +#define FIXED_CTR_CTRL_BITS 4
> +#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1)
> +
> +#define VPMU_CORE2_MAX_FIXED_PMCS     4
> +struct core2_vpmu_context {
> +    u64 fixed_ctrl;
> +    u64 ds_area;
> +    u64 pebs_enable;
> +    u64 global_ovf_status;
> +    u64 fix_counters[VPMU_CORE2_MAX_FIXED_PMCS];
> +    struct arch_msr_pair arch_msr_pair[1];
> +};
> +
> +static int arch_pmc_cnt; /* Number of general-purpose performance counters */
> +static int fixed_pmc_cnt; /* Number of fixed performance counters */
> +
> +/*
>   * QUIRK to workaround an issue on various family 6 cpus.
>   * The issue leads to endless PMC interrupt loops on the processor.
>   * If the interrupt handler is running and a pmc reaches the value 0, this
> @@ -84,11 +104,8 @@ static void check_pmc_quirk(void)
>          is_pmc_quirk = 0;    
>  }
>  
> -static int core2_get_pmc_count(void);
>  static void handle_pmc_quirk(u64 msr_content)
>  {
> -    int num_gen_pmc = core2_get_pmc_count();
> -    int num_fix_pmc  = 3;
>      int i;
>      u64 val;
>  
> @@ -96,7 +113,7 @@ static void handle_pmc_quirk(u64 msr_content)
>          return;
>  
>      val = msr_content;
> -    for ( i = 0; i < num_gen_pmc; i++ )
> +    for ( i = 0; i < arch_pmc_cnt; i++ )
>      {
>          if ( val & 0x1 )
>          {
> @@ -108,7 +125,7 @@ static void handle_pmc_quirk(u64 msr_content)
>          val >>= 1;
>      }
>      val = msr_content >> 32;
> -    for ( i = 0; i < num_fix_pmc; i++ )
> +    for ( i = 0; i < fixed_pmc_cnt; i++ )
>      {
>          if ( val & 0x1 )
>          {
> @@ -121,65 +138,32 @@ static void handle_pmc_quirk(u64 msr_content)
>      }
>  }
>  
> -static const u32 core2_fix_counters_msr[] = {
> -    MSR_CORE_PERF_FIXED_CTR0,
> -    MSR_CORE_PERF_FIXED_CTR1,
> -    MSR_CORE_PERF_FIXED_CTR2
> -};
> -
>  /*
> - * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
> - * counters. 4 bits for every counter.
> + * Read the number of general counters via CPUID.EAX[0xa].EAX[8..15]
>   */
> -#define FIXED_CTR_CTRL_BITS 4
> -#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1)
> -
> -/* The index into the core2_ctrls_msr[] of this MSR used in core2_vpmu_dump() */
> -#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0
> -
> -/* Core 2 Non-architectual Performance Control MSRs. */
> -static const u32 core2_ctrls_msr[] = {
> -    MSR_CORE_PERF_FIXED_CTR_CTRL,
> -    MSR_IA32_PEBS_ENABLE,
> -    MSR_IA32_DS_AREA
> -};
> -
> -struct pmumsr {
> -    unsigned int num;
> -    const u32 *msr;
> -};
> -
> -static const struct pmumsr core2_fix_counters = {
> -    VPMU_CORE2_NUM_FIXED,
> -    core2_fix_counters_msr
> -};
> +static int core2_get_arch_pmc_count(void)
> +{
> +    u32 eax, ebx, ecx, edx;
>  
> -static const struct pmumsr core2_ctrls = {
> -    VPMU_CORE2_NUM_CTRLS,
> -    core2_ctrls_msr
> -};
> -static int arch_pmc_cnt;
> +    cpuid(0xa, &eax, &ebx, &ecx, &edx);

Could you use cpuid_eax ?

> +    return ( (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT );
> +}
>  
>  /*
> - * Read the number of general counters via CPUID.EAX[0xa].EAX[8..15]
> + * Read the number of fixed counters via CPUID.EDX[0xa].EDX[0..4]
>   */
> -static int core2_get_pmc_count(void)
> +static int core2_get_fixed_pmc_count(void)
>  {
>      u32 eax, ebx, ecx, edx;
>  
> -    if ( arch_pmc_cnt == 0 )
> -    {
> -        cpuid(0xa, &eax, &ebx, &ecx, &edx);
> -        arch_pmc_cnt = (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT;
> -    }
> -
> -    return arch_pmc_cnt;
> +    cpuid(0xa, &eax, &ebx, &ecx, &edx);

Ditto here.

  parent reply	other threads:[~2013-09-23 19:46 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-09-20  9:41 [PATCH v2 00/13] x86/PMU: Xen PMU PV support Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 01/13] Export hypervisor symbols Boris Ostrovsky
2013-09-23 19:42   ` Konrad Rzeszutek Wilk
2013-09-23 20:06     ` Boris Ostrovsky
2013-09-24 17:40       ` Konrad Rzeszutek Wilk
2013-09-25 13:15   ` Jan Beulich
2013-09-25 14:03     ` Boris Ostrovsky
2013-09-25 14:53       ` Jan Beulich
2013-09-20  9:42 ` [PATCH v2 02/13] Set VCPU's is_running flag closer to when the VCPU is dispatched Boris Ostrovsky
2013-09-25 13:42   ` Jan Beulich
2013-09-25 14:08     ` Keir Fraser
2013-09-20  9:42 ` [PATCH v2 03/13] x86/PMU: Stop AMD counters when called from vpmu_save_force() Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 04/13] x86/VPMU: Minor VPMU cleanup Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 05/13] intel/VPMU: Clean up Intel VPMU code Boris Ostrovsky
2013-09-23 11:42   ` Dietmar Hahn
2013-09-23 19:46   ` Konrad Rzeszutek Wilk [this message]
2013-09-25 13:55   ` Jan Beulich
2013-09-25 14:39     ` Boris Ostrovsky
2013-09-25 14:57       ` Jan Beulich
2013-09-25 15:37         ` Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 06/13] x86/PMU: Add public xenpmu.h Boris Ostrovsky
2013-09-23 13:04   ` Dietmar Hahn
2013-09-23 13:16     ` Jan Beulich
2013-09-23 14:00       ` Boris Ostrovsky
2013-09-23 13:45     ` Boris Ostrovsky
2013-09-25 14:04   ` Jan Beulich
2013-09-25 15:59     ` Boris Ostrovsky
2013-09-25 16:08       ` Jan Beulich
2013-09-30 13:25     ` Boris Ostrovsky
2013-09-30 13:30       ` Jan Beulich
2013-09-30 13:55         ` Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 07/13] x86/PMU: Make vpmu not HVM-specific Boris Ostrovsky
2013-09-25 14:05   ` Jan Beulich
2013-09-25 14:49     ` Boris Ostrovsky
2013-09-25 14:57       ` Jan Beulich
2013-09-20  9:42 ` [PATCH v2 08/13] x86/PMU: Interface for setting PMU mode and flags Boris Ostrovsky
2013-09-25 14:11   ` Jan Beulich
2013-09-25 14:55     ` Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 09/13] x86/PMU: Initialize PMU for PV guests Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 10/13] x86/PMU: Add support for PMU registes handling on " Boris Ostrovsky
2013-09-23 13:50   ` Dietmar Hahn
2013-09-25 14:23   ` Jan Beulich
2013-09-25 15:03     ` Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 11/13] x86/PMU: Handle PMU interrupts for " Boris Ostrovsky
2013-09-25 14:33   ` Jan Beulich
2013-09-25 14:40     ` Andrew Cooper
2013-09-25 15:52       ` Boris Ostrovsky
2013-09-25 15:19     ` Boris Ostrovsky
2013-09-25 15:25       ` Jan Beulich
2013-09-20  9:42 ` [PATCH v2 12/13] x86/PMU: Save VPMU state for PV guests during context switch Boris Ostrovsky
2013-09-20  9:42 ` [PATCH v2 13/13] x86/PMU: Move vpmu files up from hvm directory Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130923194647.GB3019@phenom.dumpdata.com \
    --to=konrad.wilk@oracle.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=JBeulich@suse.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=dietmar.hahn@ts.fujitsu.com \
    --cc=eddie.dong@intel.com \
    --cc=jacob.shin@amd.com \
    --cc=jun.nakajima@intel.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).