From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: JBeulich@suse.com, kevin.tian@intel.com,
dietmar.hahn@ts.fujitsu.com, suravee.suthikulpanit@amd.com
Cc: keir@xen.org, andrew.cooper3@citrix.com,
donald.d.dugger@intel.com, xen-devel@lists.xen.org,
jun.nakajima@intel.com, boris.ostrovsky@oracle.com
Subject: [PATCH v6 14/19] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr
Date: Tue, 13 May 2014 11:53:28 -0400 [thread overview]
Message-ID: <1399996413-1998-15-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1399996413-1998-1-git-send-email-boris.ostrovsky@oracle.com>
The two routines share most of their logic.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
xen/arch/x86/hvm/svm/svm.c | 9 ++++++---
xen/arch/x86/hvm/vmx/vmx.c | 11 +++++++----
xen/arch/x86/hvm/vpmu.c | 42 +++++++++++++++---------------------------
xen/arch/x86/traps.c | 4 ++--
xen/include/asm-x86/hvm/vpmu.h | 6 ++++--
5 files changed, 34 insertions(+), 38 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c23db32..3d652c2 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1632,7 +1632,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- vpmu_do_rdmsr(msr, msr_content);
+ vpmu_do_msr(msr, msr_content, VPMU_MSR_READ);
break;
case MSR_AMD64_DR0_ADDRESS_MASK:
@@ -1783,9 +1783,12 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- vpmu_do_wrmsr(msr, msr_content);
- break;
+ {
+ uint64_t msr_val = msr_content;
+ vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE);
+ break;
+ }
case MSR_IA32_MCx_MISC(4): /* Threshold register */
case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3:
/*
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 1c9e742..8588f48 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2047,11 +2047,11 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
*msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
/* Perhaps vpmu will change some bits. */
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) )
goto done;
break;
default:
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) )
break;
if ( passive_domain_do_rdmsr(msr, msr_content) )
goto done;
@@ -2202,6 +2202,7 @@ void vmx_vlapic_msr_changed(struct vcpu *v)
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
struct vcpu *v = current;
+ uint64_t msr_val;
HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%#x, msr_value=%#"PRIx64, msr, msr_content);
@@ -2225,7 +2226,8 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
if ( msr_content & ~supported )
{
/* Perhaps some other bits are supported in vpmu. */
- if ( !vpmu_do_wrmsr(msr, msr_content) )
+ msr_val = msr_content;
+ if ( !vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE) )
break;
}
if ( msr_content & IA32_DEBUGCTLMSR_LBR )
@@ -2256,7 +2258,8 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
goto gp_fault;
break;
default:
- if ( vpmu_do_wrmsr(msr, msr_content) )
+ msr_val = msr_content;
+ if ( vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE) )
return X86EMUL_OKAY;
if ( passive_domain_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 9995728..896e2be 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -84,20 +84,29 @@ void vpmu_lvtpc_update(uint32_t val)
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
}
-int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw)
{
struct vpmu_struct *vpmu = vcpu_vpmu(current);
if ( !(vpmu_mode & XENPMU_MODE_ON) )
return 0;
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
+ ASSERT((rw == VPMU_MSR_READ) || (rw == VPMU_MSR_WRITE));
+
+ if ( vpmu->arch_vpmu_ops )
{
- int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content);
+ int ret;
+
+ if ( (rw == VPMU_MSR_READ) && vpmu->arch_vpmu_ops->do_rdmsr )
+ ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
+ else if ( vpmu->arch_vpmu_ops->do_wrmsr )
+ ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, *msr_content);
+ else
+ return 0;
/*
- * We may have received a PMU interrupt during WRMSR handling
- * and since do_wrmsr may load VPMU context we should save
+ * We may have received a PMU interrupt while handling MSR access
+ * and since do_wr/rdmsr may load VPMU context we should save
* (and unload) it again.
*/
if ( !is_hvm_domain(current->domain) &&
@@ -107,31 +116,10 @@ int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
vpmu->arch_vpmu_ops->arch_vpmu_save(current);
vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
}
- return ret;
- }
- return 0;
-}
-
-int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
- struct vpmu_struct *vpmu = vcpu_vpmu(current);
- if ( !(vpmu_mode & XENPMU_MODE_ON) )
- return 0;
-
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
- {
- int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
-
- if ( !is_hvm_domain(current->domain) &&
- (current->arch.vpmu.xenpmu_data->pmu_flags & PMU_CACHED) )
- {
- vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
- vpmu->arch_vpmu_ops->arch_vpmu_save(current);
- vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
- }
return ret;
}
+
return 0;
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index a0d0ba7..adbdebe 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2525,7 +2525,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
- if ( !vpmu_do_wrmsr(regs->ecx, msr_content) )
+ if ( !vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_WRITE) )
goto invalid;
break;
default:
@@ -2638,7 +2638,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
- if ( vpmu_do_rdmsr(regs->ecx, &msr_content) )
+ if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_READ) )
{
regs->eax = (uint32_t)msr_content;
regs->edx = (uint32_t)(msr_content >> 32);
diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h
index 438a913..bab8779 100644
--- a/xen/include/asm-x86/hvm/vpmu.h
+++ b/xen/include/asm-x86/hvm/vpmu.h
@@ -78,9 +78,11 @@ struct vpmu_struct {
#define vpmu_is_set_all(_vpmu, _x) (((_vpmu)->flags & (_x)) == (_x))
#define vpmu_clear(_vpmu) ((_vpmu)->flags = 0)
+#define VPMU_MSR_READ 0
+#define VPMU_MSR_WRITE 1
+
void vpmu_lvtpc_update(uint32_t val);
-int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
-int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
+int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw);
int vpmu_do_interrupt(struct cpu_user_regs *regs);
void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
--
1.8.1.4
next prev parent reply other threads:[~2014-05-13 15:53 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-13 15:53 [PATCH v6 00/19] x86/PMU: Xen PMU PV(H) support Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 01/19] common/symbols: Export hypervisor symbols to privileged guest Boris Ostrovsky
2014-05-16 8:05 ` Jan Beulich
2014-05-16 14:58 ` Boris Ostrovsky
2014-05-16 15:16 ` Jan Beulich
2014-05-16 16:12 ` Boris Ostrovsky
2014-06-05 10:29 ` Tim Deegan
2014-05-13 15:53 ` [PATCH v6 02/19] VPMU: Mark context LOADED before registers are loaded Boris Ostrovsky
2014-05-19 14:18 ` Jan Beulich
2014-05-19 15:28 ` Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 03/19] x86/VPMU: Minor VPMU cleanup Boris Ostrovsky
2014-05-19 11:55 ` Tian, Kevin
2014-05-19 14:26 ` Jan Beulich
2014-05-19 15:35 ` Boris Ostrovsky
2014-05-19 15:42 ` Jan Beulich
2014-05-13 15:53 ` [PATCH v6 04/19] intel/VPMU: Clean up Intel VPMU code Boris Ostrovsky
2014-05-19 11:59 ` Tian, Kevin
2014-05-19 14:30 ` Jan Beulich
2014-05-13 15:53 ` [PATCH v6 05/19] vmx: Merge MSR management routines Boris Ostrovsky
2014-05-19 12:00 ` Tian, Kevin
2014-05-22 10:24 ` Dietmar Hahn
2014-05-22 13:48 ` Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 06/19] x86/VPMU: Handle APIC_LVTPC accesses Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 07/19] intel/VPMU: MSR_CORE_PERF_GLOBAL_CTRL should be initialized to zero Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 08/19] x86/VPMU: Add public xenpmu.h Boris Ostrovsky
2014-05-19 12:02 ` Tian, Kevin
2014-05-20 15:24 ` Jan Beulich
2014-05-20 17:28 ` Boris Ostrovsky
2014-05-21 7:19 ` Dietmar Hahn
2014-05-21 13:56 ` Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 09/19] x86/VPMU: Make vpmu not HVM-specific Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 10/19] x86/VPMU: Interface for setting PMU mode and flags Boris Ostrovsky
2014-05-20 15:40 ` Jan Beulich
2014-05-13 15:53 ` [PATCH v6 11/19] x86/VPMU: Initialize PMU for PV guests Boris Ostrovsky
2014-05-20 15:51 ` Jan Beulich
2014-05-20 17:47 ` Boris Ostrovsky
2014-05-21 8:01 ` Jan Beulich
2014-05-21 14:03 ` Boris Ostrovsky
2014-05-20 15:52 ` Jan Beulich
2014-05-13 15:53 ` [PATCH v6 12/19] x86/VPMU: Add support for PMU register handling on " Boris Ostrovsky
2014-05-22 14:50 ` Jan Beulich
2014-05-22 17:16 ` Boris Ostrovsky
2014-05-23 6:27 ` Jan Beulich
2014-05-13 15:53 ` [PATCH v6 13/19] x86/VPMU: Handle PMU interrupts for " Boris Ostrovsky
2014-05-22 15:30 ` Jan Beulich
2014-05-22 17:25 ` Boris Ostrovsky
2014-05-23 6:29 ` Jan Beulich
2014-05-13 15:53 ` Boris Ostrovsky [this message]
2014-05-19 12:04 ` [PATCH v6 14/19] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr Tian, Kevin
2014-05-13 15:53 ` [PATCH v6 15/19] x86/VPMU: Add privileged PMU mode Boris Ostrovsky
2014-05-26 11:48 ` Jan Beulich
2014-05-27 2:08 ` Boris Ostrovsky
2014-05-27 9:10 ` Jan Beulich
2014-05-27 13:31 ` Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 16/19] x86/VPMU: Save VPMU state for PV guests during context switch Boris Ostrovsky
2014-05-26 12:03 ` Jan Beulich
2014-05-30 21:13 ` Tian, Kevin
2014-05-13 15:53 ` [PATCH v6 17/19] x86/VPMU: NMI-based VPMU support Boris Ostrovsky
2014-05-26 15:55 ` Jan Beulich
2014-05-27 2:57 ` Boris Ostrovsky
2014-05-30 21:12 ` Tian, Kevin
2014-05-13 15:53 ` [PATCH v6 18/19] x86/VPMU: Suport for PVH guests Boris Ostrovsky
2014-05-13 15:53 ` [PATCH v6 19/19] x86/VPMU: Move VPMU files up from hvm/ directory Boris Ostrovsky
2014-05-16 7:40 ` [PATCH v6 00/19] x86/PMU: Xen PMU PV(H) support Jan Beulich
2014-05-16 14:57 ` Boris Ostrovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1399996413-1998-15-git-send-email-boris.ostrovsky@oracle.com \
--to=boris.ostrovsky@oracle.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=dietmar.hahn@ts.fujitsu.com \
--cc=donald.d.dugger@intel.com \
--cc=jun.nakajima@intel.com \
--cc=keir@xen.org \
--cc=kevin.tian@intel.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).