From mboxrd@z Thu Jan 1 00:00:00 1970 From: Boris Ostrovsky Subject: [PATCH v7 18/19] x86/VPMU: NMI-based VPMU support Date: Fri, 6 Jun 2014 13:40:14 -0400 Message-ID: <1402076415-26475-19-git-send-email-boris.ostrovsky@oracle.com> References: <1402076415-26475-1-git-send-email-boris.ostrovsky@oracle.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1402076415-26475-1-git-send-email-boris.ostrovsky@oracle.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: JBeulich@suse.com, kevin.tian@intel.com, dietmar.hahn@ts.fujitsu.com, suravee.suthikulpanit@amd.com Cc: tim@xen.org, boris.ostrovsky@oracle.com, keir@xen.org, jun.nakajima@intel.com, xen-devel@lists.xen.org List-Id: xen-devel@lists.xenproject.org Add support for using NMIs as PMU interrupts. Most of processing is still performed by vpmu_do_interrupt(). However, since certain operations are not NMI-safe we defer them to a softint that vpmu_do_interrupt() will schedule: * For PV guests that would be send_guest_vcpu_virq() * For HVM guests it's VLAPIC accesses and hvm_get_segment_register() (the later can be called in privileged profiling mode when the interrupted guest is an HVM one). With send_guest_vcpu_virq() and hvm_get_segment_register() for PV(H) and vlapic accesses for HVM moved to sofint, the only routines/macros that vpmu_do_interrupt() calls in NMI mode are: * memcpy() * querying domain type (is_XX_domain()) * guest_cpu_user_regs() * XLAT_cpu_user_regs() * raise_softirq() * vcpu_vpmu() * vpmu_ops->arch_vpmu_save() * vpmu_ops->do_interrupt() (in the future for PVH support) The latter two only access PMU MSRs with {rd,wr}msrl() (not the _safe versions which would not be NMI-safe). Signed-off-by: Boris Ostrovsky Reviewed-by: Kevin Tian Reviewed-by: Dietmar Hahn Tested-by: Dietmar Hahn --- xen/arch/x86/hvm/svm/vpmu.c | 1 + xen/arch/x86/hvm/vmx/vpmu_core2.c | 1 + xen/arch/x86/hvm/vpmu.c | 188 +++++++++++++++++++++++++++++++------- xen/include/xen/softirq.h | 1 + 4 files changed, 158 insertions(+), 33 deletions(-) diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c index 96833ea..06dfc01 100644 --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -220,6 +220,7 @@ static inline void context_save(struct vcpu *v) rdmsrl(counters[i], counter_regs[i]); } +/* Must be NMI-safe */ static int amd_vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index 3b56d3d..ac4998b 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -303,6 +303,7 @@ static inline void __core2_vpmu_save(struct vcpu *v) rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status); } +/* Must be NMI-safe */ static int core2_vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index 8f98182..c6280dc 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include /* @@ -48,34 +49,60 @@ uint64_t __read_mostly vpmu_features = 0; static void parse_vpmu_param(char *s); custom_param("vpmu", parse_vpmu_param); +static void pmu_softnmi(void); + static DEFINE_PER_CPU(struct vcpu *, last_vcpu); +static DEFINE_PER_CPU(struct vcpu *, sampled_vcpu); + +static uint32_t __read_mostly vpmu_interrupt_type = PMU_APIC_VECTOR; static void __init parse_vpmu_param(char *s) { - switch ( parse_bool(s) ) - { - case 0: - break; - default: - if ( !strcmp(s, "bts") ) - vpmu_features |= XENPMU_FEATURE_INTEL_BTS; - else if ( *s ) + char *ss; + + vpmu_mode = XENPMU_MODE_ON; + if (*s == '\0') + return; + + do { + ss = strchr(s, ','); + if ( ss ) + *ss = '\0'; + + switch ( parse_bool(s) ) { - printk("VPMU: unknown flag: %s - vpmu disabled!\n", s); + case 0: + vpmu_mode = XENPMU_MODE_OFF; + return; + case -1: + if ( !strcmp(s, "nmi") ) + vpmu_interrupt_type = APIC_DM_NMI; + else if ( !strcmp(s, "bts") ) + vpmu_features |= XENPMU_FEATURE_INTEL_BTS; + else if ( !strcmp(s, "priv") ) + { + vpmu_mode &= ~XENPMU_MODE_ON; + vpmu_mode |= XENPMU_MODE_PRIV; + } + else + { + printk("VPMU: unknown flag: %s - vpmu disabled!\n", s); + vpmu_mode = XENPMU_MODE_OFF; + return; + } + default: break; } - /* fall through */ - case 1: - vpmu_mode = XENPMU_MODE_ON; - break; - } + + s = ss + 1; + } while ( ss ); } void vpmu_lvtpc_update(uint32_t val) { struct vpmu_struct *vpmu = vcpu_vpmu(current); - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED); + vpmu->hw_lapic_lvtpc = vpmu_interrupt_type | (val & APIC_LVT_MASKED); /* Postpone APIC updates for PV(H) guests if PMU interrupt is pending */ if ( is_hvm_domain(current->domain) || @@ -84,6 +111,24 @@ void vpmu_lvtpc_update(uint32_t val) apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); } +static void vpmu_send_interrupt(struct vcpu *v) +{ + struct vlapic *vlapic; + u32 vlapic_lvtpc; + + ASSERT( is_hvm_vcpu(v) ); + + vlapic = vcpu_vlapic(v); + if ( !is_vlapic_lvtpc_enabled(vlapic) ) + return; + + vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC); + if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED ) + vlapic_set_irq(vcpu_vlapic(v), vlapic_lvtpc & APIC_VECTOR_MASK, 0); + else + v->nmi_pending = 1; +} + int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw) { struct vpmu_struct *vpmu = vcpu_vpmu(current); @@ -142,6 +187,7 @@ static struct vcpu *choose_dom0_vcpu(void) return v; } +/* This routine may be called in NMI context */ int vpmu_do_interrupt(struct cpu_user_regs *regs) { struct vcpu *v = current; @@ -219,8 +265,9 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs) if ( current->arch.flags & TF_kernel_mode ) v->arch.vpmu.xenpmu_data->pmu.r.regs.cs &= ~3; } - else + else if ( !(vpmu_interrupt_type & APIC_DM_NMI) ) { + /* Unsafe in NMI context, defer to softint later */ struct segment_register seg_cs; hvm_get_segment_register(current, x86_seg_cs, &seg_cs); @@ -237,8 +284,12 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs) memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, gregs, sizeof(struct cpu_user_regs)); - hvm_get_segment_register(current, x86_seg_cs, &seg_cs); - v->arch.vpmu.xenpmu_data->pmu.r.regs.cs = seg_cs.sel; + /* This is unsafe in NMI context, we'll do it in softint handler */ + if ( !(vpmu_interrupt_type & APIC_DM_NMI ) ) + { + hvm_get_segment_register(current, x86_seg_cs, &seg_cs); + v->arch.vpmu.xenpmu_data->pmu.r.regs.cs = seg_cs.sel; + } } v->arch.vpmu.xenpmu_data->domain_id = current->domain->domain_id; @@ -249,30 +300,30 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs) apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc | APIC_LVT_MASKED); vpmu->hw_lapic_lvtpc |= APIC_LVT_MASKED; - send_guest_vcpu_virq(v, VIRQ_XENPMU); + if ( vpmu_interrupt_type & APIC_DM_NMI ) + { + per_cpu(sampled_vcpu, smp_processor_id()) = current; + raise_softirq(PMU_SOFTIRQ); + } + else + send_guest_vcpu_virq(v, VIRQ_XENPMU); return 1; } if ( vpmu->arch_vpmu_ops ) { - struct vlapic *vlapic = vcpu_vlapic(v); - u32 vlapic_lvtpc; - unsigned char int_vec; - if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) ) return 0; - if ( !is_vlapic_lvtpc_enabled(vlapic) ) - return 1; - - vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC); - int_vec = vlapic_lvtpc & APIC_VECTOR_MASK; - - if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED ) - vlapic_set_irq(vcpu_vlapic(v), int_vec, 0); + if ( vpmu_interrupt_type & APIC_DM_NMI ) + { + per_cpu(sampled_vcpu, smp_processor_id()) = current; + raise_softirq(PMU_SOFTIRQ); + } else - v->nmi_pending = 1; + vpmu_send_interrupt(v); + return 1; } @@ -303,6 +354,8 @@ static void vpmu_save_force(void *arg) vpmu_reset(vpmu, VPMU_CONTEXT_SAVE); per_cpu(last_vcpu, smp_processor_id()) = NULL; + + pmu_softnmi(); } void vpmu_save(struct vcpu *v) @@ -320,7 +373,10 @@ void vpmu_save(struct vcpu *v) if ( vpmu->arch_vpmu_ops->arch_vpmu_save(v) ) vpmu_reset(vpmu, VPMU_CONTEXT_LOADED); - apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); + apic_write(APIC_LVTPC, vpmu_interrupt_type | APIC_LVT_MASKED); + + /* Make sure there are no outstanding PMU NMIs */ + pmu_softnmi(); } void vpmu_load(struct vcpu *v) @@ -365,6 +421,8 @@ void vpmu_load(struct vcpu *v) vpmu_save_force(prev); vpmu_reset(vpmu, VPMU_CONTEXT_LOADED); + pmu_softnmi(); + vpmu = vcpu_vpmu(v); } @@ -474,11 +532,55 @@ static void vpmu_unload_all(void) } } +/* Process the softirq set by PMU NMI handler */ +static void pmu_softnmi(void) +{ + struct vcpu *v, *sampled = this_cpu(sampled_vcpu); + + if ( sampled == NULL ) + return; + this_cpu(sampled_vcpu) = NULL; + + if ( (vpmu_mode & XENPMU_MODE_PRIV) || + (sampled->domain->domain_id >= DOMID_FIRST_RESERVED) ) + { + v = choose_dom0_vcpu(); + if ( !v ) + return; + } + else + { + if ( is_hvm_domain(sampled->domain) ) + { + vpmu_send_interrupt(sampled); + return; + } + v = sampled; + } + + if ( has_hvm_container_domain(sampled->domain) ) + { + struct segment_register seg_cs; + + hvm_get_segment_register(sampled, x86_seg_cs, &seg_cs); + v->arch.vpmu.xenpmu_data->pmu.r.regs.cs = seg_cs.sel; + } + + send_guest_vcpu_virq(v, VIRQ_XENPMU); +} + +int pmu_nmi_interrupt(struct cpu_user_regs *regs, int cpu) +{ + return vpmu_do_interrupt(regs); +} + static int pvpmu_init(struct domain *d, xen_pmu_params_t *params) { struct vcpu *v; struct page_info *page; uint64_t gfn = params->d.val; + static bool_t __read_mostly pvpmu_initted = 0; + static DEFINE_SPINLOCK(init_lock); if ( !params || params->vcpu >= d->max_vcpus ) return -EINVAL; @@ -501,6 +603,26 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t *params) return -EINVAL; } + spin_lock(&init_lock); + + if ( !pvpmu_initted ) + { + if ( reserve_lapic_nmi() == 0 ) + set_nmi_callback(pmu_nmi_interrupt); + else + { + spin_unlock(&init_lock); + printk("Failed to reserve PMU NMI\n"); + put_page(page); + return -EBUSY; + } + open_softirq(PMU_SOFTIRQ, pmu_softnmi); + + pvpmu_initted = 1; + } + + spin_unlock(&init_lock); + vpmu_initialise(v); return 0; diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h index 0c0d481..5829fa4 100644 --- a/xen/include/xen/softirq.h +++ b/xen/include/xen/softirq.h @@ -8,6 +8,7 @@ enum { NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, RCU_SOFTIRQ, TASKLET_SOFTIRQ, + PMU_SOFTIRQ, NR_COMMON_SOFTIRQS }; -- 1.8.1.4