From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: jbeulich@suse.com, kevin.tian@intel.com, suravee.suthikulpanit@amd.com
Cc: keir@xen.org, andrew.cooper3@citrix.com, tim@xen.org,
xen-devel@lists.xen.org, jun.nakajima@intel.com,
boris.ostrovsky@oracle.com
Subject: [PATCH v9 16/20] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr
Date: Fri, 8 Aug 2014 12:55:42 -0400 [thread overview]
Message-ID: <1407516946-17833-17-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1407516946-17833-1-git-send-email-boris.ostrovsky@oracle.com>
The two routines share most of their logic.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
xen/arch/x86/hvm/svm/svm.c | 4 +--
xen/arch/x86/hvm/vmx/vmx.c | 6 ++--
xen/arch/x86/hvm/vpmu.c | 66 +++++++++++++++++++-----------------------
xen/arch/x86/traps.c | 4 +--
xen/include/asm-x86/hvm/vpmu.h | 6 ++--
5 files changed, 40 insertions(+), 46 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index da5af5c..8935404 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1642,7 +1642,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) )
goto gpf;
break;
@@ -1794,7 +1794,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- if ( vpmu_do_wrmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, &msr_content, VPMU_MSR_WRITE) )
goto gpf;
break;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c8dbe80..7c11550 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2085,7 +2085,7 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
case MSR_IA32_PEBS_ENABLE:
case MSR_IA32_DS_AREA:
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) )
goto gp_fault;
break;
default:
@@ -2261,7 +2261,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
if ( msr_content & ~supported )
{
/* Perhaps some other bits are supported in vpmu. */
- if ( vpmu_do_wrmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, &msr_content, VPMU_MSR_WRITE) )
goto gp_fault;
}
if ( msr_content & IA32_DEBUGCTLMSR_LBR )
@@ -2297,7 +2297,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
case MSR_IA32_PEBS_ENABLE:
case MSR_IA32_DS_AREA:
- if ( vpmu_do_wrmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, &msr_content, VPMU_MSR_WRITE) )
goto gp_fault;
break;
default:
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 818f721..c9cf6c0 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -91,57 +91,49 @@ void vpmu_lvtpc_update(uint32_t val)
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
}
-int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, int rw)
{
struct vcpu *curr = current;
struct vpmu_struct *vpmu = vcpu_vpmu(curr);
+ struct arch_vpmu_ops *ops = vpmu->arch_vpmu_ops;
+ int ret = 0;
if ( !(vpmu_mode & XENPMU_MODE_SELF) )
return 0;
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
+ switch ( rw )
{
- int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content);
-
- /*
- * We may have received a PMU interrupt during WRMSR handling
- * and since do_wrmsr may load VPMU context we should save
- * (and unload) it again.
- */
- if ( !is_hvm_domain(curr->domain) &&
- vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags & PMU_CACHED) )
- {
- vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
- vpmu->arch_vpmu_ops->arch_vpmu_save(curr);
- vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
- }
- return ret;
- }
- return 0;
-}
+ case VPMU_MSR_READ:
+ if ( !ops || !ops->do_rdmsr )
+ return 0;
+ ret = ops->do_rdmsr(msr, msr_content);
+ break;
-int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
- struct vcpu *curr = current;
- struct vpmu_struct *vpmu = vcpu_vpmu(curr);
+ case VPMU_MSR_WRITE:
+ if ( !ops || !ops->do_wrmsr )
+ return 0;
+ ret = ops->do_wrmsr(msr, *msr_content);
+ break;
- if ( !(vpmu_mode & XENPMU_MODE_SELF) )
+ default:
+ ASSERT(0);
return 0;
+ }
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
+ /*
+ * We may have received a PMU interrupt while handling MSR access
+ * and since do_wr/rdmsr may load VPMU context we should save
+ * (and unload) it again.
+ */
+ if ( !is_hvm_domain(curr->domain) &&
+ vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags & PMU_CACHED) )
{
- int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
-
- if ( !is_hvm_domain(curr->domain) &&
- vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags & PMU_CACHED) )
- {
- vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
- vpmu->arch_vpmu_ops->arch_vpmu_save(curr);
- vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
- }
- return ret;
+ vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
+ ops->arch_vpmu_save(curr);
+ vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
}
- return 0;
+
+ return ret;
}
static struct vcpu *choose_hwdom_vcpu(void)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index f1830d5..4c4292b 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2575,7 +2575,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
if ( vpmu_msr ||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !vpmu_msr) )
{
- if ( vpmu_do_wrmsr(regs->ecx, msr_content) )
+ if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_WRITE) )
goto fail;
break;
}
@@ -2698,7 +2698,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
if ( vpmu_msr ||
((boot_cpu_data.x86_vendor != X86_VENDOR_AMD) && !vpmu_msr) )
{
- if ( vpmu_do_rdmsr(regs->ecx, &msr_content) )
+ if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_READ) )
goto fail;
regs->eax = (uint32_t)msr_content;
diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h
index 25954c6..429ab27 100644
--- a/xen/include/asm-x86/hvm/vpmu.h
+++ b/xen/include/asm-x86/hvm/vpmu.h
@@ -94,9 +94,11 @@ static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu,
return !!((vpmu->flags & mask) == mask);
}
+#define VPMU_MSR_READ 0
+#define VPMU_MSR_WRITE 1
+
void vpmu_lvtpc_update(uint32_t val);
-int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
-int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
+int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, int rw);
int vpmu_do_interrupt(struct cpu_user_regs *regs);
void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
--
1.8.1.4
next prev parent reply other threads:[~2014-08-08 16:55 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-08-08 16:55 [PATCH v9 00/20] x86/PMU: Xen PMU PV(H) support Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 01/20] common/symbols: Export hypervisor symbols to privileged guest Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 02/20] x86/VPMU: Manage VPMU_CONTEXT_SAVE flag in vpmu_save_force() Boris Ostrovsky
2014-08-11 13:28 ` Jan Beulich
2014-08-11 15:35 ` Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 03/20] x86/VPMU: Set MSR bitmaps only for HVM/PVH guests Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 04/20] x86/VPMU: Make vpmu macros a bit more efficient Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 05/20] intel/VPMU: Clean up Intel VPMU code Boris Ostrovsky
2014-08-11 13:45 ` Jan Beulich
2014-08-11 16:01 ` Boris Ostrovsky
2014-08-11 16:13 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 06/20] vmx: Merge MSR management routines Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 07/20] x86/VPMU: Handle APIC_LVTPC accesses Boris Ostrovsky
2014-08-11 13:49 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 08/20] intel/VPMU: MSR_CORE_PERF_GLOBAL_CTRL should be initialized to zero Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 09/20] x86/VPMU: Add public xenpmu.h Boris Ostrovsky
2014-08-11 14:08 ` Jan Beulich
2014-08-11 16:15 ` Boris Ostrovsky
2014-08-18 16:02 ` Boris Ostrovsky
2014-08-18 20:23 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 10/20] x86/VPMU: Make vpmu not HVM-specific Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 11/20] x86/VPMU: Interface for setting PMU mode and flags Boris Ostrovsky
2014-08-12 10:37 ` Jan Beulich
2014-08-12 15:12 ` Boris Ostrovsky
2014-08-12 15:35 ` Jan Beulich
2014-08-12 16:25 ` Boris Ostrovsky
2014-08-14 16:32 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 12/20] x86/VPMU: Initialize PMU for PV(H) guests Boris Ostrovsky
2014-08-12 11:59 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 13/20] x86/VPMU: When handling MSR accesses, leave fault injection to callers Boris Ostrovsky
2014-08-12 12:45 ` Jan Beulich
2014-08-12 15:47 ` Boris Ostrovsky
2014-08-12 16:00 ` Jan Beulich
2014-08-12 16:30 ` Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 14/20] x86/VPMU: Add support for PMU register handling on PV guests Boris Ostrovsky
2014-08-12 12:55 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 15/20] x86/VPMU: Handle PMU interrupts for " Boris Ostrovsky
2014-08-12 12:58 ` Jan Beulich
2014-08-08 16:55 ` Boris Ostrovsky [this message]
2014-08-08 16:55 ` [PATCH v9 17/20] x86/VPMU: Add privileged PMU mode Boris Ostrovsky
2014-08-12 13:06 ` Jan Beulich
2014-08-12 16:14 ` Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 18/20] x86/VPMU: Save VPMU state for PV guests during context switch Boris Ostrovsky
2014-08-12 14:15 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 19/20] x86/VPMU: NMI-based VPMU support Boris Ostrovsky
2014-08-12 14:24 ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 20/20] x86/VPMU: Move VPMU files up from hvm/ directory Boris Ostrovsky
2014-08-12 14:26 ` Jan Beulich
2014-08-11 13:32 ` [PATCH v9 00/20] x86/PMU: Xen PMU PV(H) support Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1407516946-17833-17-git-send-email-boris.ostrovsky@oracle.com \
--to=boris.ostrovsky@oracle.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=jun.nakajima@intel.com \
--cc=keir@xen.org \
--cc=kevin.tian@intel.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).