From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: JBeulich@suse.com, kevin.tian@intel.com,
suravee.suthikulpanit@amd.com, Aravind.Gopalakrishnan@amd.com,
dietmar.hahn@ts.fujitsu.com, dgdegra@tycho.nsa.gov,
andrew.cooper3@citrix.com
Cc: boris.ostrovsky@oracle.com, xen-devel@lists.xen.org
Subject: [PATCH v25 08/15] x86/VPMU: When handling MSR accesses, leave fault injection to callers
Date: Fri, 19 Jun 2015 14:44:39 -0400 [thread overview]
Message-ID: <1434739486-1611-9-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1434739486-1611-1-git-send-email-boris.ostrovsky@oracle.com>
Hypervisor cannot easily inject faults into PV guests from arch-specific VPMU
read/write MSR handlers (unlike it is in the case of HVM guests).
With this patch vpmu_do_msr() will return an error code to indicate whether an
error was encountered during MSR processing (instead of stating that the access
was to a VPMU register). The caller will then decide how to deal with the error.
As part of this patch we also check for validity of certain MSR accesses right
when we determine which register is being written, as opposed to postponing this
until later.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
---
Changes in v25:
* Updated commit message to mention reason for changing vpmu_do_msr return values
xen/arch/x86/hvm/svm/svm.c | 6 ++-
xen/arch/x86/hvm/svm/vpmu.c | 6 +--
xen/arch/x86/hvm/vmx/vmx.c | 24 ++++++++---
xen/arch/x86/hvm/vmx/vpmu_core2.c | 86 +++++++++++++++------------------------
4 files changed, 57 insertions(+), 65 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 680eebe..3b5d15d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1708,7 +1708,8 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- vpmu_do_rdmsr(msr, msr_content);
+ if ( vpmu_do_rdmsr(msr, msr_content) )
+ goto gpf;
break;
case MSR_AMD64_DR0_ADDRESS_MASK:
@@ -1859,7 +1860,8 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- vpmu_do_wrmsr(msr, msr_content, 0);
+ if ( vpmu_do_wrmsr(msr, msr_content, 0) )
+ goto gpf;
break;
case MSR_IA32_MCx_MISC(4): /* Threshold register */
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index a8572a6..74d03a5 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -305,7 +305,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
{
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
- return 1;
+ return 0;
vpmu_set(vpmu, VPMU_RUNNING);
if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
@@ -335,7 +335,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
/* Write to hw counters */
wrmsrl(msr, msr_content);
- return 1;
+ return 0;
}
static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
@@ -353,7 +353,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
rdmsrl(msr, *msr_content);
- return 1;
+ return 0;
}
static void amd_vpmu_destroy(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 50e11dd..db1fa82 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2166,12 +2166,17 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
*msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
/* Perhaps vpmu will change some bits. */
+ /* FALLTHROUGH */
+ case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
+ case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
+ case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+ case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ case MSR_IA32_PEBS_ENABLE:
+ case MSR_IA32_DS_AREA:
if ( vpmu_do_rdmsr(msr, msr_content) )
- goto done;
+ goto gp_fault;
break;
default:
- if ( vpmu_do_rdmsr(msr, msr_content) )
- break;
if ( passive_domain_do_rdmsr(msr, msr_content) )
goto done;
switch ( long_mode_do_msr_read(msr, msr_content) )
@@ -2347,7 +2352,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
if ( msr_content & ~supported )
{
/* Perhaps some other bits are supported in vpmu. */
- if ( !vpmu_do_wrmsr(msr, msr_content, supported) )
+ if ( vpmu_do_wrmsr(msr, msr_content, supported) )
break;
}
if ( msr_content & IA32_DEBUGCTLMSR_LBR )
@@ -2375,9 +2380,16 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
if ( !nvmx_msr_write_intercept(msr, msr_content) )
goto gp_fault;
break;
+ case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
+ case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(7):
+ case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+ case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ case MSR_IA32_PEBS_ENABLE:
+ case MSR_IA32_DS_AREA:
+ if ( vpmu_do_wrmsr(msr, msr_content, 0) )
+ goto gp_fault;
+ break;
default:
- if ( vpmu_do_wrmsr(msr, msr_content, 0) )
- return X86EMUL_OKAY;
if ( passive_domain_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index e7642e5..9710149 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -454,36 +454,41 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
IA32_DEBUGCTLMSR_BTS_OFF_USR;
if ( !(msr_content & ~supported) &&
vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
- return 1;
+ return 0;
if ( (msr_content & supported) &&
!vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
printk(XENLOG_G_WARNING
"%pv: Debug Store unsupported on this CPU\n",
current);
}
- return 0;
+ return -EINVAL;
}
ASSERT(!supported);
+ if ( type == MSR_TYPE_COUNTER &&
+ (msr_content &
+ ~((1ull << core2_get_bitwidth_fix_count()) - 1)) )
+ /* Writing unsupported bits to a fixed counter */
+ return -EINVAL;
+
core2_vpmu_cxt = vpmu->context;
enabled_cntrs = vpmu->priv_context;
switch ( msr )
{
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
core2_vpmu_cxt->global_status &= ~msr_content;
- return 1;
+ return 0;
case MSR_CORE_PERF_GLOBAL_STATUS:
gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
"MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
- return 1;
+ return -EINVAL;
case MSR_IA32_PEBS_ENABLE:
if ( msr_content & 1 )
gdprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, "
"which is not supported.\n");
core2_vpmu_cxt->pebs_enable = msr_content;
- return 1;
+ return 0;
case MSR_IA32_DS_AREA:
if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
{
@@ -492,18 +497,21 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
gdprintk(XENLOG_WARNING,
"Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
msr_content);
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
- return 1;
+ return -EINVAL;
}
core2_vpmu_cxt->ds_area = msr_content;
break;
}
gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n");
- return 1;
+ return 0;
case MSR_CORE_PERF_GLOBAL_CTRL:
global_ctrl = msr_content;
break;
case MSR_CORE_PERF_FIXED_CTR_CTRL:
+ if ( msr_content &
+ ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) )
+ return -EINVAL;
+
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
*enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32);
if ( msr_content != 0 )
@@ -526,6 +534,9 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
+ if ( msr_content & (~((1ull << 32) - 1)) )
+ return -EINVAL;
+
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
if ( msr_content & (1ULL << 22) )
@@ -537,45 +548,17 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
}
}
+ if ( type != MSR_TYPE_GLOBAL )
+ wrmsrl(msr, msr_content);
+ else
+ vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+
if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) )
vpmu_set(vpmu, VPMU_RUNNING);
else
vpmu_reset(vpmu, VPMU_RUNNING);
- if ( type != MSR_TYPE_GLOBAL )
- {
- u64 mask;
- int inject_gp = 0;
- switch ( type )
- {
- case MSR_TYPE_ARCH_CTRL: /* MSR_P6_EVNTSEL[0,...] */
- mask = ~((1ull << 32) - 1);
- if (msr_content & mask)
- inject_gp = 1;
- break;
- case MSR_TYPE_CTRL: /* IA32_FIXED_CTR_CTRL */
- if ( msr == MSR_IA32_DS_AREA )
- break;
- /* 4 bits per counter, currently 3 fixed counters implemented. */
- mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1);
- if (msr_content & mask)
- inject_gp = 1;
- break;
- case MSR_TYPE_COUNTER: /* IA32_FIXED_CTR[0-2] */
- mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1);
- if (msr_content & mask)
- inject_gp = 1;
- break;
- }
- if (inject_gp)
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
- else
- wrmsrl(msr, msr_content);
- }
- else
- vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
-
- return 1;
+ return 0;
}
static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
@@ -603,19 +586,14 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
rdmsrl(msr, *msr_content);
}
}
- else
+ else if ( msr == MSR_IA32_MISC_ENABLE )
{
/* Extension for BTS */
- if ( msr == MSR_IA32_MISC_ENABLE )
- {
- if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
- *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
- }
- else
- return 0;
+ if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
+ *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
}
- return 1;
+ return 0;
}
static void core2_vpmu_do_cpuid(unsigned int input,
@@ -760,9 +738,9 @@ static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
int type = -1, index = -1;
if ( !is_core2_vpmu_msr(msr, &type, &index) )
- return 0;
+ return -EINVAL;
*msr_content = 0;
- return 1;
+ return 0;
}
/*
--
1.8.1.4
next prev parent reply other threads:[~2015-06-19 18:44 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-19 18:44 [PATCH v25 00/15] x86/PMU: Xen PMU PV(H) support Boris Ostrovsky
2015-06-19 18:44 ` [PATCH v25 01/15] common/symbols: Export hypervisor symbols to privileged guest Boris Ostrovsky
2015-06-19 18:44 ` [PATCH v25 02/15] x86/VPMU: Add public xenpmu.h Boris Ostrovsky
2015-06-19 18:44 ` [PATCH v25 03/15] x86/VPMU: Make vpmu not HVM-specific Boris Ostrovsky
2015-06-19 18:44 ` [PATCH v25 04/15] x86/VPMU: Interface for setting PMU mode and flags Boris Ostrovsky
2015-06-22 15:10 ` Jan Beulich
2015-06-22 16:10 ` Boris Ostrovsky
2015-06-23 8:26 ` Jan Beulich
2015-06-24 2:17 ` Boris Ostrovsky
2015-07-07 7:16 ` Dietmar Hahn
2015-07-09 1:27 ` Tian, Kevin
2015-06-19 18:44 ` [PATCH v25 05/15] x86/VPMU: Initialize VPMUs with __initcall Boris Ostrovsky
2015-07-08 11:48 ` Dietmar Hahn
2015-07-09 6:04 ` Tian, Kevin
2015-06-19 18:44 ` [PATCH v25 06/15] x86/VPMU: Initialize PMU for PV(H) guests Boris Ostrovsky
2015-07-08 12:20 ` Dietmar Hahn
2015-06-19 18:44 ` [PATCH v25 07/15] x86/VPMU: Save VPMU state for PV guests during context switch Boris Ostrovsky
2015-06-22 15:14 ` Jan Beulich
2015-07-08 5:51 ` Dietmar Hahn
2015-06-19 18:44 ` Boris Ostrovsky [this message]
2015-07-08 6:11 ` [PATCH v25 08/15] x86/VPMU: When handling MSR accesses, leave fault injection to callers Dietmar Hahn
2015-06-19 18:44 ` [PATCH v25 09/15] x86/VPMU: Add support for PMU register handling on PV guests Boris Ostrovsky
2015-06-19 18:44 ` [PATCH v25 10/15] x86/VPMU: Use pre-computed masks when checking validity of MSRs Boris Ostrovsky
2015-07-08 6:49 ` Dietmar Hahn
2015-06-19 18:44 ` [PATCH v25 11/15] VPMU/AMD: Check MSR values before writing to hardware Boris Ostrovsky
2015-07-08 15:35 ` Aravind Gopalakrishnan
2015-06-19 18:44 ` [PATCH v25 12/15] x86/VPMU: Handle PMU interrupts for PV(H) guests Boris Ostrovsky
2015-06-22 15:20 ` Jan Beulich
2015-07-09 12:19 ` Dietmar Hahn
2015-06-19 18:44 ` [PATCH v25 13/15] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr Boris Ostrovsky
2015-07-09 6:13 ` Tian, Kevin
2015-06-19 18:44 ` [PATCH v25 14/15] x86/VPMU: Add privileged PMU mode Boris Ostrovsky
2015-07-09 6:13 ` Tian, Kevin
2015-07-09 12:38 ` Dietmar Hahn
2015-06-19 18:44 ` [PATCH v25 15/15] x86/VPMU: Move VPMU files up from hvm/ directory Boris Ostrovsky
2015-07-08 8:03 ` [PATCH v25 00/15] x86/PMU: Xen PMU PV(H) support Jan Beulich
2015-07-10 9:13 ` Dietmar Hahn
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1434739486-1611-9-git-send-email-boris.ostrovsky@oracle.com \
--to=boris.ostrovsky@oracle.com \
--cc=Aravind.Gopalakrishnan@amd.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=dgdegra@tycho.nsa.gov \
--cc=dietmar.hahn@ts.fujitsu.com \
--cc=kevin.tian@intel.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).