From: drjones@redhat.com (Andrew Jones)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v10 15/21] KVM: ARM64: Add access handler for PMUSERENR register
Date: Thu, 28 Jan 2016 20:58:14 +0100 [thread overview]
Message-ID: <20160128195814.GF16453@hawk.localdomain> (raw)
In-Reply-To: <1453866709-20324-16-git-send-email-zhaoshenglong@huawei.com>
On Wed, Jan 27, 2016 at 11:51:43AM +0800, Shannon Zhao wrote:
> From: Shannon Zhao <shannon.zhao@linaro.org>
>
> This register resets as unknown in 64bit mode while it resets as zero
> in 32bit mode. Here we choose to reset it as zero for consistency.
>
> PMUSERENR_EL0 holds some bits which decide whether PMU registers can be
> accessed from EL0. Add some check helpers to handle the access from EL0.
>
> When these bits are zero, only reading PMUSERENR will trap to EL2 and
> writing PMUSERENR or reading/writing other PMU registers will trap to
> EL1 other than EL2 when HCR.TGE==0. To current KVM configuration
> (HCR.TGE==0) there is no way to get these traps. Here we write 0xf to
> physical PMUSERENR register on VM entry, so that it will trap PMU access
> from EL0 to EL2. Within the register access handler we check the real
> value of guest PMUSERENR register to decide whether this access is
> allowed. If not allowed, return false to inject UND to guest.
>
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
> ---
> arch/arm64/include/asm/pmu.h | 9 ++++
> arch/arm64/kvm/hyp/hyp.h | 1 +
> arch/arm64/kvm/hyp/switch.c | 3 ++
> arch/arm64/kvm/sys_regs.c | 100 ++++++++++++++++++++++++++++++++++++++++---
> 4 files changed, 107 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
> index 6f14a01..eb3dc88 100644
> --- a/arch/arm64/include/asm/pmu.h
> +++ b/arch/arm64/include/asm/pmu.h
> @@ -69,4 +69,13 @@
> #define ARMV8_EXCLUDE_EL0 (1 << 30)
> #define ARMV8_INCLUDE_EL2 (1 << 27)
>
> +/*
> + * PMUSERENR: user enable reg
> + */
> +#define ARMV8_USERENR_MASK 0xf /* Mask for writable bits */
> +#define ARMV8_USERENR_EN (1 << 0) /* PMU regs can be accessed@EL0 */
> +#define ARMV8_USERENR_SW (1 << 1) /* PMSWINC can be written@EL0 */
> +#define ARMV8_USERENR_CR (1 << 2) /* Cycle counter can be read@EL0 */
> +#define ARMV8_USERENR_ER (1 << 3) /* Event counter can be read@EL0 */
> +
> #endif /* __ASM_PMU_H */
> diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
> index fb27517..9a28b7bd8 100644
> --- a/arch/arm64/kvm/hyp/hyp.h
> +++ b/arch/arm64/kvm/hyp/hyp.h
> @@ -22,6 +22,7 @@
> #include <linux/kvm_host.h>
> #include <asm/kvm_mmu.h>
> #include <asm/sysreg.h>
> +#include <asm/pmu.h>
>
> #define __hyp_text __section(.hyp.text) notrace
>
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index ca8f5a5..1a7d679 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -37,6 +37,8 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
> /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
> write_sysreg(1 << 15, hstr_el2);
> write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
> + /* Make sure we trap PMU access from EL0 to EL2 */
> + write_sysreg(ARMV8_USERENR_MASK, pmuserenr_el0);
> write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
> }
>
> @@ -45,6 +47,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
> write_sysreg(HCR_RW, hcr_el2);
> write_sysreg(0, hstr_el2);
> write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
> + write_sysreg(0, pmuserenr_el0);
> write_sysreg(0, cptr_el2);
> }
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index eefc60a..084e527 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -453,6 +453,37 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> vcpu_sys_reg(vcpu, PMCR_EL0) = val;
> }
>
> +static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
> +{
> + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
> +
> + return !((reg & ARMV8_USERENR_EN) || vcpu_mode_priv(vcpu));
> +}
> +
> +static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
> +{
> + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
> +
> + return !((reg & (ARMV8_USERENR_SW | ARMV8_USERENR_EN))
> + || vcpu_mode_priv(vcpu));
> +}
> +
> +static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
> +{
> + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
> +
> + return !((reg & (ARMV8_USERENR_CR | ARMV8_USERENR_EN))
> + || vcpu_mode_priv(vcpu));
> +}
> +
> +static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
> +{
> + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
> +
> + return !((reg & (ARMV8_USERENR_ER | ARMV8_USERENR_EN))
> + || vcpu_mode_priv(vcpu));
> +}
> +
> static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> @@ -461,6 +492,9 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> + if (pmu_access_el0_disabled(vcpu))
> + return false;
Based on the function name I'm not sure I like embedding vcpu_mode_priv.
It seems a condition like
if (!vcpu_mode_priv(vcpu) && !pmu_access_el0_enabled(vcpu))
return false;
would be more clear here and the other callsites below. (I also prefer
checking for enabled vs. disabled)
> +
> if (p->is_write) {
> /* Only update writeable bits of PMCR */
> val = vcpu_sys_reg(vcpu, PMCR_EL0);
> @@ -484,6 +518,9 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> + if (pmu_access_event_counter_el0_disabled(vcpu))
> + return false;
> +
> if (p->is_write)
> vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
> else
> @@ -501,7 +538,7 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> - if (p->is_write)
> + if (p->is_write || pmu_access_el0_disabled(vcpu))
> return false;
>
> if (!(p->Op2 & 1))
> @@ -534,6 +571,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> + if (pmu_access_el0_disabled(vcpu))
> + return false;
> +
> if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
> /* PMXEVTYPER_EL0 */
> idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_COUNTER_MASK;
> @@ -574,11 +614,17 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
> if (r->CRn == 9 && r->CRm == 13) {
> if (r->Op2 == 2) {
> /* PMXEVCNTR_EL0 */
> + if (pmu_access_event_counter_el0_disabled(vcpu))
> + return false;
> +
> idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
> & ARMV8_COUNTER_MASK;
> reg = PMEVCNTR0_EL0 + idx;
> } else if (r->Op2 == 0) {
> /* PMCCNTR_EL0 */
> + if (pmu_access_cycle_counter_el0_disabled(vcpu))
> + return false;
> +
> idx = ARMV8_CYCLE_IDX;
> reg = PMCCNTR_EL0;
> } else {
> @@ -586,6 +632,9 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
> }
> } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
> /* PMEVCNTRn_EL0 */
> + if (pmu_access_event_counter_el0_disabled(vcpu))
> + return false;
> +
> idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
> reg = PMEVCNTR0_EL0 + idx;
> } else {
> @@ -596,10 +645,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
> return false;
>
> val = kvm_pmu_get_counter_value(vcpu, idx);
> - if (p->is_write)
> + if (p->is_write) {
> + if (pmu_access_el0_disabled(vcpu))
> + return false;
> +
This check isn't necessary because at this point we've either already
checked ARMV8_USERENR_EN with one of the other tests, or we've BUGed.
> vcpu_sys_reg(vcpu, reg) += (s64)p->regval - val;
> - else
> + } else {
> p->regval = val;
> + }
It's nasty to have to add 3 checks to access_pmu_evcntr. Can we instead
just have another helper that takes a reg_idx argument, i.e.
static bool pmu_reg_access_el0_disabled(struct kvm_vcpu *vcpu, u64 idx)
{
if (idx == PMCCNTR_EL0)
return pmu_access_cycle_counter_el0_disabled
if (idx >= PMEVCNTR0_EL0 && idx <= PMEVCNTR30_EL0)
return pmu_access_event_counter_el0_disabled
...
and call it once after the pmu_counter_idx_valid check?
>
> return true;
> }
> @@ -612,6 +665,9 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> + if (pmu_access_el0_disabled(vcpu))
> + return false;
> +
> mask = kvm_pmu_valid_counter_mask(vcpu);
> if (p->is_write) {
> val = p->regval & mask;
> @@ -639,6 +695,9 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> + if (!vcpu_mode_priv(vcpu))
> + return false;
> +
> if (p->is_write) {
> if (r->Op2 & 0x1)
> /* accessing PMINTENSET_EL1 */
> @@ -663,6 +722,9 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> + if (pmu_access_el0_disabled(vcpu))
> + return false;
> +
> if (p->is_write) {
> if (r->CRm & 0x2)
> /* accessing PMOVSSET_EL0 */
> @@ -685,6 +747,9 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> if (!kvm_arm_pmu_v3_ready(vcpu))
> return trap_raz_wi(vcpu, p, r);
>
> + if (pmu_write_swinc_el0_disabled(vcpu))
> + return false;
> +
> if (p->is_write) {
> mask = kvm_pmu_valid_counter_mask(vcpu);
> kvm_pmu_software_increment(vcpu, p->regval & mask);
> @@ -694,6 +759,26 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> return false;
> }
>
> +static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> + const struct sys_reg_desc *r)
> +{
> + if (!kvm_arm_pmu_v3_ready(vcpu))
> + return trap_raz_wi(vcpu, p, r);
> +
> + if (p->is_write) {
> + if (!vcpu_mode_priv(vcpu))
> + return false;
> +
> + vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
> + & ARMV8_USERENR_MASK;
> + } else {
> + p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
> + & ARMV8_USERENR_MASK;
> + }
> +
> + return true;
> +}
> +
> /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
> #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
> /* DBGBVRn_EL1 */ \
> @@ -923,9 +1008,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> /* PMXEVCNTR_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
> access_pmu_evcntr },
> - /* PMUSERENR_EL0 */
> + /* PMUSERENR_EL0
> + * This register resets as unknown in 64bit mode while it resets as zero
> + * in 32bit mode. Here we choose to reset it as zero for consistency.
> + */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
> - trap_raz_wi },
> + access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
> /* PMOVSSET_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
> access_pmovs, reset_unknown, PMOVSSET_EL0 },
> @@ -1250,7 +1338,7 @@ static const struct sys_reg_desc cp15_regs[] = {
> { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
> { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
> { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
> - { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
> + { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
> { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
> { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
> { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
> --
> 2.0.4
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
next prev parent reply other threads:[~2016-01-28 19:58 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-27 3:51 [PATCH v10 00/21] KVM: ARM64: Add guest PMU support Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 01/21] ARM64: Move PMU register related defines to asm/pmu.h Shannon Zhao
2016-02-10 10:36 ` Will Deacon
2016-01-27 3:51 ` [PATCH v10 02/21] KVM: ARM64: Define PMU data structure for each vcpu Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 03/21] KVM: ARM64: Add offset defines for PMU registers Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 04/21] KVM: ARM64: Add access handler for PMCR register Shannon Zhao
2016-01-28 15:36 ` Andrew Jones
2016-01-28 20:43 ` Andrew Jones
2016-01-29 2:07 ` Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 05/21] KVM: ARM64: Add access handler for PMSELR register Shannon Zhao
2016-01-28 20:10 ` Andrew Jones
2016-01-27 3:51 ` [PATCH v10 06/21] KVM: ARM64: Add access handler for PMCEID0 and PMCEID1 register Shannon Zhao
2016-01-28 20:34 ` Andrew Jones
2016-01-29 3:47 ` Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 07/21] KVM: ARM64: PMU: Add perf event map and introduce perf event creating function Shannon Zhao
2016-01-28 16:31 ` Andrew Jones
2016-01-28 16:45 ` Marc Zyngier
2016-01-28 18:06 ` Will Deacon
2016-01-29 6:14 ` Shannon Zhao
2016-01-29 6:26 ` Shannon Zhao
2016-01-29 10:18 ` Will Deacon
2016-01-29 13:11 ` Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 08/21] KVM: ARM64: Add access handler for event type register Shannon Zhao
2016-01-28 20:11 ` Andrew Jones
2016-01-29 1:42 ` Shannon Zhao
2016-01-29 11:25 ` Andrew Jones
2016-01-27 3:51 ` [PATCH v10 09/21] KVM: ARM64: Add access handler for event counter register Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 10/21] KVM: ARM64: Add access handler for PMCNTENSET and PMCNTENCLR register Shannon Zhao
2016-01-28 18:08 ` Andrew Jones
2016-01-28 18:12 ` Andrew Jones
2016-01-27 3:51 ` [PATCH v10 11/21] KVM: ARM64: Add access handler for PMINTENSET and PMINTENCLR register Shannon Zhao
2016-01-28 18:18 ` Andrew Jones
2016-01-27 3:51 ` [PATCH v10 12/21] KVM: ARM64: Add access handler for PMOVSSET and PMOVSCLR register Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 13/21] KVM: ARM64: Add access handler for PMSWINC register Shannon Zhao
2016-01-28 18:37 ` Andrew Jones
2016-01-27 3:51 ` [PATCH v10 14/21] KVM: ARM64: Add helper to handle PMCR register bits Shannon Zhao
2016-01-28 19:15 ` Andrew Jones
2016-01-27 3:51 ` [PATCH v10 15/21] KVM: ARM64: Add access handler for PMUSERENR register Shannon Zhao
2016-01-28 19:58 ` Andrew Jones [this message]
2016-01-29 7:37 ` Shannon Zhao
2016-01-29 11:08 ` Andrew Jones
2016-01-29 13:17 ` Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 16/21] KVM: ARM64: Add PMU overflow interrupt routing Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 17/21] KVM: ARM64: Reset PMU state when resetting vcpu Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 18/21] KVM: ARM64: Free perf event of PMU when destroying vcpu Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 19/21] KVM: ARM64: Add a new feature bit for PMUv3 Shannon Zhao
2016-01-28 20:54 ` Andrew Jones
2016-01-27 3:51 ` [PATCH v10 20/21] KVM: ARM: Introduce per-vcpu kvm device controls Shannon Zhao
2016-01-27 3:51 ` [PATCH v10 21/21] KVM: ARM64: Add a new vcpu device control group for PMUv3 Shannon Zhao
2016-01-28 21:12 ` Andrew Jones
2016-01-28 21:30 ` [PATCH v10 00/21] KVM: ARM64: Add guest PMU support Andrew Jones
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160128195814.GF16453@hawk.localdomain \
--to=drjones@redhat.com \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).